Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   4 * Copyright (C) 2010 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_log_priv.h"
  13#include "xfs_trans_resv.h"
  14#include "xfs_mount.h"
  15#include "xfs_extent_busy.h"
  16#include "xfs_quota.h"
  17#include "xfs_trans.h"
  18#include "xfs_trans_priv.h"
  19#include "xfs_log.h"
  20#include "xfs_trace.h"
  21#include "xfs_error.h"
  22#include "xfs_defer.h"
  23#include "xfs_inode.h"
  24#include "xfs_dquot_item.h"
  25#include "xfs_dquot.h"
  26#include "xfs_icache.h"
  27
  28kmem_zone_t	*xfs_trans_zone;
  29
  30#if defined(CONFIG_TRACEPOINTS)
  31static void
  32xfs_trans_trace_reservations(
  33	struct xfs_mount	*mp)
  34{
  35	struct xfs_trans_res	resv;
  36	struct xfs_trans_res	*res;
  37	struct xfs_trans_res	*end_res;
  38	int			i;
  39
  40	res = (struct xfs_trans_res *)M_RES(mp);
  41	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
  42	for (i = 0; res < end_res; i++, res++)
  43		trace_xfs_trans_resv_calc(mp, i, res);
  44	xfs_log_get_max_trans_res(mp, &resv);
  45	trace_xfs_trans_resv_calc(mp, -1, &resv);
  46}
  47#else
  48# define xfs_trans_trace_reservations(mp)
  49#endif
  50
  51/*
  52 * Initialize the precomputed transaction reservation values
  53 * in the mount structure.
  54 */
  55void
  56xfs_trans_init(
  57	struct xfs_mount	*mp)
  58{
  59	xfs_trans_resv_calc(mp, M_RES(mp));
  60	xfs_trans_trace_reservations(mp);
  61}
  62
  63/*
  64 * Free the transaction structure.  If there is more clean up
  65 * to do when the structure is freed, add it here.
  66 */
  67STATIC void
  68xfs_trans_free(
  69	struct xfs_trans	*tp)
  70{
  71	xfs_extent_busy_sort(&tp->t_busy);
  72	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  73
  74	trace_xfs_trans_free(tp, _RET_IP_);
  75	xfs_trans_clear_context(tp);
  76	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  77		sb_end_intwrite(tp->t_mountp->m_super);
  78	xfs_trans_free_dqinfo(tp);
  79	kmem_cache_free(xfs_trans_zone, tp);
  80}
  81
  82/*
  83 * This is called to create a new transaction which will share the
  84 * permanent log reservation of the given transaction.  The remaining
  85 * unused block and rt extent reservations are also inherited.  This
  86 * implies that the original transaction is no longer allowed to allocate
  87 * blocks.  Locks and log items, however, are no inherited.  They must
  88 * be added to the new transaction explicitly.
  89 */
  90STATIC struct xfs_trans *
  91xfs_trans_dup(
  92	struct xfs_trans	*tp)
  93{
  94	struct xfs_trans	*ntp;
  95
  96	trace_xfs_trans_dup(tp, _RET_IP_);
  97
  98	ntp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
  99
 100	/*
 101	 * Initialize the new transaction structure.
 102	 */
 103	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 104	ntp->t_mountp = tp->t_mountp;
 105	INIT_LIST_HEAD(&ntp->t_items);
 106	INIT_LIST_HEAD(&ntp->t_busy);
 107	INIT_LIST_HEAD(&ntp->t_dfops);
 108	ntp->t_firstblock = NULLFSBLOCK;
 109
 110	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 111	ASSERT(tp->t_ticket != NULL);
 112
 113	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 114		       (tp->t_flags & XFS_TRANS_RESERVE) |
 115		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
 116		       (tp->t_flags & XFS_TRANS_RES_FDBLKS);
 117	/* We gave our writer reference to the new transaction */
 118	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 119	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 120
 121	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
 122	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 123	tp->t_blk_res = tp->t_blk_res_used;
 124
 125	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 126	tp->t_rtx_res = tp->t_rtx_res_used;
 127
 128	xfs_trans_switch_context(tp, ntp);
 129
 130	/* move deferred ops over to the new tp */
 131	xfs_defer_move(ntp, tp);
 132
 133	xfs_trans_dup_dqinfo(tp, ntp);
 134	return ntp;
 135}
 136
 137/*
 138 * This is called to reserve free disk blocks and log space for the
 139 * given transaction.  This must be done before allocating any resources
 140 * within the transaction.
 141 *
 142 * This will return ENOSPC if there are not enough blocks available.
 143 * It will sleep waiting for available log space.
 144 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 145 * is used by long running transactions.  If any one of the reservations
 146 * fails then they will all be backed out.
 147 *
 148 * This does not do quota reservations. That typically is done by the
 149 * caller afterwards.
 150 */
 151static int
 152xfs_trans_reserve(
 153	struct xfs_trans	*tp,
 154	struct xfs_trans_res	*resp,
 155	uint			blocks,
 156	uint			rtextents)
 157{
 158	struct xfs_mount	*mp = tp->t_mountp;
 159	int			error = 0;
 160	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 161
 
 
 
 162	/*
 163	 * Attempt to reserve the needed disk blocks by decrementing
 164	 * the number needed from the number available.  This will
 165	 * fail if the count would go below zero.
 166	 */
 167	if (blocks > 0) {
 168		error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
 169		if (error != 0)
 
 170			return -ENOSPC;
 
 171		tp->t_blk_res += blocks;
 172	}
 173
 174	/*
 175	 * Reserve the log space needed for this transaction.
 176	 */
 177	if (resp->tr_logres > 0) {
 178		bool	permanent = false;
 179
 180		ASSERT(tp->t_log_res == 0 ||
 181		       tp->t_log_res == resp->tr_logres);
 182		ASSERT(tp->t_log_count == 0 ||
 183		       tp->t_log_count == resp->tr_logcount);
 184
 185		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 186			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 187			permanent = true;
 188		} else {
 189			ASSERT(tp->t_ticket == NULL);
 190			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 191		}
 192
 193		if (tp->t_ticket != NULL) {
 194			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 195			error = xfs_log_regrant(mp, tp->t_ticket);
 196		} else {
 197			error = xfs_log_reserve(mp,
 198						resp->tr_logres,
 199						resp->tr_logcount,
 200						&tp->t_ticket, XFS_TRANSACTION,
 201						permanent);
 202		}
 203
 204		if (error)
 205			goto undo_blocks;
 206
 207		tp->t_log_res = resp->tr_logres;
 208		tp->t_log_count = resp->tr_logcount;
 209	}
 210
 211	/*
 212	 * Attempt to reserve the needed realtime extents by decrementing
 213	 * the number needed from the number available.  This will
 214	 * fail if the count would go below zero.
 215	 */
 216	if (rtextents > 0) {
 217		error = xfs_mod_frextents(mp, -((int64_t)rtextents));
 218		if (error) {
 219			error = -ENOSPC;
 220			goto undo_log;
 221		}
 222		tp->t_rtx_res += rtextents;
 223	}
 224
 225	return 0;
 226
 227	/*
 228	 * Error cases jump to one of these labels to undo any
 229	 * reservations which have already been performed.
 230	 */
 231undo_log:
 232	if (resp->tr_logres > 0) {
 233		xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
 234		tp->t_ticket = NULL;
 235		tp->t_log_res = 0;
 236		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 237	}
 238
 239undo_blocks:
 240	if (blocks > 0) {
 241		xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
 242		tp->t_blk_res = 0;
 243	}
 
 
 
 244	return error;
 245}
 246
 247int
 248xfs_trans_alloc(
 249	struct xfs_mount	*mp,
 250	struct xfs_trans_res	*resp,
 251	uint			blocks,
 252	uint			rtextents,
 253	uint			flags,
 254	struct xfs_trans	**tpp)
 255{
 256	struct xfs_trans	*tp;
 257	bool			want_retry = true;
 258	int			error;
 259
 260	/*
 261	 * Allocate the handle before we do our freeze accounting and setting up
 262	 * GFP_NOFS allocation context so that we avoid lockdep false positives
 263	 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
 264	 */
 265retry:
 266	tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
 267	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 268		sb_start_intwrite(mp->m_super);
 269	xfs_trans_set_context(tp);
 270
 271	/*
 272	 * Zero-reservation ("empty") transactions can't modify anything, so
 273	 * they're allowed to run while we're frozen.
 274	 */
 275	WARN_ON(resp->tr_logres > 0 &&
 276		mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 277	ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
 278	       xfs_sb_version_haslazysbcount(&mp->m_sb));
 279
 280	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 281	tp->t_flags = flags;
 282	tp->t_mountp = mp;
 283	INIT_LIST_HEAD(&tp->t_items);
 284	INIT_LIST_HEAD(&tp->t_busy);
 285	INIT_LIST_HEAD(&tp->t_dfops);
 286	tp->t_firstblock = NULLFSBLOCK;
 287
 288	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 289	if (error == -ENOSPC && want_retry) {
 290		xfs_trans_cancel(tp);
 291
 292		/*
 293		 * We weren't able to reserve enough space for the transaction.
 294		 * Flush the other speculative space allocations to free space.
 295		 * Do not perform a synchronous scan because callers can hold
 296		 * other locks.
 297		 */
 298		error = xfs_blockgc_free_space(mp, NULL);
 299		if (error)
 300			return error;
 301
 302		want_retry = false;
 303		goto retry;
 304	}
 305	if (error) {
 306		xfs_trans_cancel(tp);
 307		return error;
 308	}
 309
 310	trace_xfs_trans_alloc(tp, _RET_IP_);
 311
 312	*tpp = tp;
 313	return 0;
 314}
 315
 316/*
 317 * Create an empty transaction with no reservation.  This is a defensive
 318 * mechanism for routines that query metadata without actually modifying them --
 319 * if the metadata being queried is somehow cross-linked (think a btree block
 320 * pointer that points higher in the tree), we risk deadlock.  However, blocks
 321 * grabbed as part of a transaction can be re-grabbed.  The verifiers will
 322 * notice the corrupt block and the operation will fail back to userspace
 323 * without deadlocking.
 324 *
 325 * Note the zero-length reservation; this transaction MUST be cancelled without
 326 * any dirty data.
 327 *
 328 * Callers should obtain freeze protection to avoid a conflict with fs freezing
 329 * where we can be grabbing buffers at the same time that freeze is trying to
 330 * drain the buffer LRU list.
 331 */
 332int
 333xfs_trans_alloc_empty(
 334	struct xfs_mount		*mp,
 335	struct xfs_trans		**tpp)
 336{
 337	struct xfs_trans_res		resv = {0};
 338
 339	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
 340}
 341
 342/*
 343 * Record the indicated change to the given field for application
 344 * to the file system's superblock when the transaction commits.
 345 * For now, just store the change in the transaction structure.
 346 *
 347 * Mark the transaction structure to indicate that the superblock
 348 * needs to be updated before committing.
 349 *
 350 * Because we may not be keeping track of allocated/free inodes and
 351 * used filesystem blocks in the superblock, we do not mark the
 352 * superblock dirty in this transaction if we modify these fields.
 353 * We still need to update the transaction deltas so that they get
 354 * applied to the incore superblock, but we don't want them to
 355 * cause the superblock to get locked and logged if these are the
 356 * only fields in the superblock that the transaction modifies.
 357 */
 358void
 359xfs_trans_mod_sb(
 360	xfs_trans_t	*tp,
 361	uint		field,
 362	int64_t		delta)
 363{
 364	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 365	xfs_mount_t	*mp = tp->t_mountp;
 366
 367	switch (field) {
 368	case XFS_TRANS_SB_ICOUNT:
 369		tp->t_icount_delta += delta;
 370		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 371			flags &= ~XFS_TRANS_SB_DIRTY;
 372		break;
 373	case XFS_TRANS_SB_IFREE:
 374		tp->t_ifree_delta += delta;
 375		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 376			flags &= ~XFS_TRANS_SB_DIRTY;
 377		break;
 378	case XFS_TRANS_SB_FDBLOCKS:
 379		/*
 380		 * Track the number of blocks allocated in the transaction.
 381		 * Make sure it does not exceed the number reserved. If so,
 382		 * shutdown as this can lead to accounting inconsistency.
 383		 */
 384		if (delta < 0) {
 385			tp->t_blk_res_used += (uint)-delta;
 386			if (tp->t_blk_res_used > tp->t_blk_res)
 387				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 388		} else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
 389			int64_t	blkres_delta;
 390
 391			/*
 392			 * Return freed blocks directly to the reservation
 393			 * instead of the global pool, being careful not to
 394			 * overflow the trans counter. This is used to preserve
 395			 * reservation across chains of transaction rolls that
 396			 * repeatedly free and allocate blocks.
 397			 */
 398			blkres_delta = min_t(int64_t, delta,
 399					     UINT_MAX - tp->t_blk_res);
 400			tp->t_blk_res += blkres_delta;
 401			delta -= blkres_delta;
 402		}
 403		tp->t_fdblocks_delta += delta;
 404		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 405			flags &= ~XFS_TRANS_SB_DIRTY;
 406		break;
 407	case XFS_TRANS_SB_RES_FDBLOCKS:
 408		/*
 409		 * The allocation has already been applied to the
 410		 * in-core superblock's counter.  This should only
 411		 * be applied to the on-disk superblock.
 412		 */
 413		tp->t_res_fdblocks_delta += delta;
 414		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 415			flags &= ~XFS_TRANS_SB_DIRTY;
 416		break;
 417	case XFS_TRANS_SB_FREXTENTS:
 418		/*
 419		 * Track the number of blocks allocated in the
 420		 * transaction.  Make sure it does not exceed the
 421		 * number reserved.
 422		 */
 423		if (delta < 0) {
 424			tp->t_rtx_res_used += (uint)-delta;
 425			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 426		}
 427		tp->t_frextents_delta += delta;
 428		break;
 429	case XFS_TRANS_SB_RES_FREXTENTS:
 430		/*
 431		 * The allocation has already been applied to the
 432		 * in-core superblock's counter.  This should only
 433		 * be applied to the on-disk superblock.
 434		 */
 435		ASSERT(delta < 0);
 436		tp->t_res_frextents_delta += delta;
 437		break;
 438	case XFS_TRANS_SB_DBLOCKS:
 
 439		tp->t_dblocks_delta += delta;
 440		break;
 441	case XFS_TRANS_SB_AGCOUNT:
 442		ASSERT(delta > 0);
 443		tp->t_agcount_delta += delta;
 444		break;
 445	case XFS_TRANS_SB_IMAXPCT:
 446		tp->t_imaxpct_delta += delta;
 447		break;
 448	case XFS_TRANS_SB_REXTSIZE:
 449		tp->t_rextsize_delta += delta;
 450		break;
 451	case XFS_TRANS_SB_RBMBLOCKS:
 452		tp->t_rbmblocks_delta += delta;
 453		break;
 454	case XFS_TRANS_SB_RBLOCKS:
 455		tp->t_rblocks_delta += delta;
 456		break;
 457	case XFS_TRANS_SB_REXTENTS:
 458		tp->t_rextents_delta += delta;
 459		break;
 460	case XFS_TRANS_SB_REXTSLOG:
 461		tp->t_rextslog_delta += delta;
 462		break;
 463	default:
 464		ASSERT(0);
 465		return;
 466	}
 467
 468	tp->t_flags |= flags;
 469}
 470
 471/*
 472 * xfs_trans_apply_sb_deltas() is called from the commit code
 473 * to bring the superblock buffer into the current transaction
 474 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 475 *
 476 * For now we just look at each field allowed to change and change
 477 * it if necessary.
 478 */
 479STATIC void
 480xfs_trans_apply_sb_deltas(
 481	xfs_trans_t	*tp)
 482{
 483	xfs_dsb_t	*sbp;
 484	struct xfs_buf	*bp;
 485	int		whole = 0;
 486
 487	bp = xfs_trans_getsb(tp);
 488	sbp = bp->b_addr;
 489
 490	/*
 
 
 
 
 
 
 
 491	 * Only update the superblock counters if we are logging them
 492	 */
 493	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 494		if (tp->t_icount_delta)
 495			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 496		if (tp->t_ifree_delta)
 497			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 498		if (tp->t_fdblocks_delta)
 499			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 500		if (tp->t_res_fdblocks_delta)
 501			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 502	}
 503
 504	if (tp->t_frextents_delta)
 505		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 506	if (tp->t_res_frextents_delta)
 507		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 508
 509	if (tp->t_dblocks_delta) {
 510		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 511		whole = 1;
 512	}
 513	if (tp->t_agcount_delta) {
 514		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 515		whole = 1;
 516	}
 517	if (tp->t_imaxpct_delta) {
 518		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 519		whole = 1;
 520	}
 521	if (tp->t_rextsize_delta) {
 522		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 523		whole = 1;
 524	}
 525	if (tp->t_rbmblocks_delta) {
 526		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 527		whole = 1;
 528	}
 529	if (tp->t_rblocks_delta) {
 530		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 531		whole = 1;
 532	}
 533	if (tp->t_rextents_delta) {
 534		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 535		whole = 1;
 536	}
 537	if (tp->t_rextslog_delta) {
 538		sbp->sb_rextslog += tp->t_rextslog_delta;
 539		whole = 1;
 540	}
 541
 542	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 543	if (whole)
 544		/*
 545		 * Log the whole thing, the fields are noncontiguous.
 546		 */
 547		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 548	else
 549		/*
 550		 * Since all the modifiable fields are contiguous, we
 551		 * can get away with this.
 552		 */
 553		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 554				  offsetof(xfs_dsb_t, sb_frextents) +
 555				  sizeof(sbp->sb_frextents) - 1);
 556}
 557
 558/*
 559 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
 560 * apply superblock counter changes to the in-core superblock.  The
 561 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 562 * applied to the in-core superblock.  The idea is that that has already been
 563 * done.
 564 *
 565 * If we are not logging superblock counters, then the inode allocated/free and
 566 * used block counts are not updated in the on disk superblock. In this case,
 567 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 568 * still need to update the incore superblock with the changes.
 569 *
 570 * Deltas for the inode count are +/-64, hence we use a large batch size of 128
 571 * so we don't need to take the counter lock on every update.
 572 */
 573#define XFS_ICOUNT_BATCH	128
 574
 575void
 576xfs_trans_unreserve_and_mod_sb(
 577	struct xfs_trans	*tp)
 578{
 579	struct xfs_mount	*mp = tp->t_mountp;
 580	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 581	int64_t			blkdelta = 0;
 582	int64_t			rtxdelta = 0;
 583	int64_t			idelta = 0;
 584	int64_t			ifreedelta = 0;
 585	int			error;
 586
 587	/* calculate deltas */
 588	if (tp->t_blk_res > 0)
 589		blkdelta = tp->t_blk_res;
 590	if ((tp->t_fdblocks_delta != 0) &&
 591	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 592	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 593	        blkdelta += tp->t_fdblocks_delta;
 594
 595	if (tp->t_rtx_res > 0)
 596		rtxdelta = tp->t_rtx_res;
 597	if ((tp->t_frextents_delta != 0) &&
 598	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 599		rtxdelta += tp->t_frextents_delta;
 600
 601	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 602	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 603		idelta = tp->t_icount_delta;
 604		ifreedelta = tp->t_ifree_delta;
 605	}
 606
 607	/* apply the per-cpu counters */
 608	if (blkdelta) {
 609		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 610		ASSERT(!error);
 611	}
 612
 613	if (idelta)
 614		percpu_counter_add_batch(&mp->m_icount, idelta,
 615					 XFS_ICOUNT_BATCH);
 
 
 
 
 616
 617	if (ifreedelta)
 618		percpu_counter_add(&mp->m_ifree, ifreedelta);
 
 
 
 619
 620	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 621		return;
 622
 623	/* apply remaining deltas */
 624	spin_lock(&mp->m_sb_lock);
 625	mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta;
 626	mp->m_sb.sb_icount += idelta;
 627	mp->m_sb.sb_ifree += ifreedelta;
 628	mp->m_sb.sb_frextents += rtxdelta;
 629	mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
 630	mp->m_sb.sb_agcount += tp->t_agcount_delta;
 631	mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
 632	mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
 633	mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
 634	mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
 635	mp->m_sb.sb_rextents += tp->t_rextents_delta;
 636	mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
 637	spin_unlock(&mp->m_sb_lock);
 638
 639	/*
 640	 * Debug checks outside of the spinlock so they don't lock up the
 641	 * machine if they fail.
 642	 */
 643	ASSERT(mp->m_sb.sb_imax_pct >= 0);
 644	ASSERT(mp->m_sb.sb_rextslog >= 0);
 645	return;
 646}
 647
 648/* Add the given log item to the transaction's list of log items. */
 649void
 650xfs_trans_add_item(
 651	struct xfs_trans	*tp,
 652	struct xfs_log_item	*lip)
 653{
 654	ASSERT(lip->li_mountp == tp->t_mountp);
 655	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 656	ASSERT(list_empty(&lip->li_trans));
 657	ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
 658
 659	list_add_tail(&lip->li_trans, &tp->t_items);
 660	trace_xfs_trans_add_item(tp, _RET_IP_);
 661}
 662
 663/*
 664 * Unlink the log item from the transaction. the log item is no longer
 665 * considered dirty in this transaction, as the linked transaction has
 666 * finished, either by abort or commit completion.
 667 */
 668void
 669xfs_trans_del_item(
 670	struct xfs_log_item	*lip)
 671{
 672	clear_bit(XFS_LI_DIRTY, &lip->li_flags);
 673	list_del_init(&lip->li_trans);
 674}
 675
 676/* Detach and unlock all of the items in a transaction */
 677static void
 678xfs_trans_free_items(
 679	struct xfs_trans	*tp,
 680	bool			abort)
 681{
 682	struct xfs_log_item	*lip, *next;
 683
 684	trace_xfs_trans_free_items(tp, _RET_IP_);
 685
 686	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
 687		xfs_trans_del_item(lip);
 688		if (abort)
 689			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 690		if (lip->li_ops->iop_release)
 691			lip->li_ops->iop_release(lip);
 692	}
 693}
 694
 695static inline void
 696xfs_log_item_batch_insert(
 697	struct xfs_ail		*ailp,
 698	struct xfs_ail_cursor	*cur,
 699	struct xfs_log_item	**log_items,
 700	int			nr_items,
 701	xfs_lsn_t		commit_lsn)
 702{
 703	int	i;
 704
 705	spin_lock(&ailp->ail_lock);
 706	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 707	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 708
 709	for (i = 0; i < nr_items; i++) {
 710		struct xfs_log_item *lip = log_items[i];
 711
 712		if (lip->li_ops->iop_unpin)
 713			lip->li_ops->iop_unpin(lip, 0);
 714	}
 715}
 716
 717/*
 718 * Bulk operation version of xfs_trans_committed that takes a log vector of
 719 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 720 * minimise lock traffic.
 721 *
 722 * If we are called with the aborted flag set, it is because a log write during
 723 * a CIL checkpoint commit has failed. In this case, all the items in the
 724 * checkpoint have already gone through iop_committed and iop_committing, which
 725 * means that checkpoint commit abort handling is treated exactly the same
 726 * as an iclog write error even though we haven't started any IO yet. Hence in
 727 * this case all we need to do is iop_committed processing, followed by an
 728 * iop_unpin(aborted) call.
 729 *
 730 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 731 * at the end of the AIL, the insert cursor avoids the need to walk
 732 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 733 * call. This saves a lot of needless list walking and is a net win, even
 734 * though it slightly increases that amount of AIL lock traffic to set it up
 735 * and tear it down.
 736 */
 737void
 738xfs_trans_committed_bulk(
 739	struct xfs_ail		*ailp,
 740	struct xfs_log_vec	*log_vector,
 741	xfs_lsn_t		commit_lsn,
 742	bool			aborted)
 743{
 744#define LOG_ITEM_BATCH_SIZE	32
 745	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 746	struct xfs_log_vec	*lv;
 747	struct xfs_ail_cursor	cur;
 748	int			i = 0;
 749
 750	spin_lock(&ailp->ail_lock);
 751	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 752	spin_unlock(&ailp->ail_lock);
 753
 754	/* unpin all the log items */
 755	for (lv = log_vector; lv; lv = lv->lv_next ) {
 756		struct xfs_log_item	*lip = lv->lv_item;
 757		xfs_lsn_t		item_lsn;
 758
 759		if (aborted)
 760			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 761
 762		if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
 763			lip->li_ops->iop_release(lip);
 764			continue;
 765		}
 766
 767		if (lip->li_ops->iop_committed)
 768			item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 769		else
 770			item_lsn = commit_lsn;
 771
 772		/* item_lsn of -1 means the item needs no further processing */
 773		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 774			continue;
 775
 776		/*
 777		 * if we are aborting the operation, no point in inserting the
 778		 * object into the AIL as we are in a shutdown situation.
 779		 */
 780		if (aborted) {
 781			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
 782			if (lip->li_ops->iop_unpin)
 783				lip->li_ops->iop_unpin(lip, 1);
 784			continue;
 785		}
 786
 787		if (item_lsn != commit_lsn) {
 788
 789			/*
 790			 * Not a bulk update option due to unusual item_lsn.
 791			 * Push into AIL immediately, rechecking the lsn once
 792			 * we have the ail lock. Then unpin the item. This does
 793			 * not affect the AIL cursor the bulk insert path is
 794			 * using.
 795			 */
 796			spin_lock(&ailp->ail_lock);
 797			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 798				xfs_trans_ail_update(ailp, lip, item_lsn);
 799			else
 800				spin_unlock(&ailp->ail_lock);
 801			if (lip->li_ops->iop_unpin)
 802				lip->li_ops->iop_unpin(lip, 0);
 803			continue;
 804		}
 805
 806		/* Item is a candidate for bulk AIL insert.  */
 807		log_items[i++] = lv->lv_item;
 808		if (i >= LOG_ITEM_BATCH_SIZE) {
 809			xfs_log_item_batch_insert(ailp, &cur, log_items,
 810					LOG_ITEM_BATCH_SIZE, commit_lsn);
 811			i = 0;
 812		}
 813	}
 814
 815	/* make sure we insert the remainder! */
 816	if (i)
 817		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 818
 819	spin_lock(&ailp->ail_lock);
 820	xfs_trans_ail_cursor_done(&cur);
 821	spin_unlock(&ailp->ail_lock);
 822}
 823
 824/*
 825 * Commit the given transaction to the log.
 826 *
 827 * XFS disk error handling mechanism is not based on a typical
 828 * transaction abort mechanism. Logically after the filesystem
 829 * gets marked 'SHUTDOWN', we can't let any new transactions
 830 * be durable - ie. committed to disk - because some metadata might
 831 * be inconsistent. In such cases, this returns an error, and the
 832 * caller may assume that all locked objects joined to the transaction
 833 * have already been unlocked as if the commit had succeeded.
 834 * Do not reference the transaction structure after this call.
 835 */
 836static int
 837__xfs_trans_commit(
 838	struct xfs_trans	*tp,
 839	bool			regrant)
 840{
 841	struct xfs_mount	*mp = tp->t_mountp;
 842	xfs_csn_t		commit_seq = 0;
 843	int			error = 0;
 844	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 845
 846	trace_xfs_trans_commit(tp, _RET_IP_);
 847
 848	/*
 849	 * Finish deferred items on final commit. Only permanent transactions
 850	 * should ever have deferred ops.
 851	 */
 852	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
 853		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 854	if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
 855		error = xfs_defer_finish_noroll(&tp);
 856		if (error)
 857			goto out_unreserve;
 858	}
 859
 860	/*
 861	 * If there is nothing to be logged by the transaction,
 862	 * then unlock all of the items associated with the
 863	 * transaction and free the transaction structure.
 864	 * Also make sure to return any reserved blocks to
 865	 * the free pool.
 866	 */
 867	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 868		goto out_unreserve;
 869
 870	if (XFS_FORCED_SHUTDOWN(mp)) {
 871		error = -EIO;
 872		goto out_unreserve;
 873	}
 874
 875	ASSERT(tp->t_ticket != NULL);
 876
 877	/*
 878	 * If we need to update the superblock, then do it now.
 879	 */
 880	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 881		xfs_trans_apply_sb_deltas(tp);
 882	xfs_trans_apply_dquot_deltas(tp);
 883
 884	xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
 885
 
 886	xfs_trans_free(tp);
 887
 888	/*
 889	 * If the transaction needs to be synchronous, then force the
 890	 * log out now and wait for it.
 891	 */
 892	if (sync) {
 893		error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL);
 894		XFS_STATS_INC(mp, xs_trans_sync);
 895	} else {
 896		XFS_STATS_INC(mp, xs_trans_async);
 897	}
 898
 899	return error;
 900
 901out_unreserve:
 902	xfs_trans_unreserve_and_mod_sb(tp);
 903
 904	/*
 905	 * It is indeed possible for the transaction to be not dirty but
 906	 * the dqinfo portion to be.  All that means is that we have some
 907	 * (non-persistent) quota reservations that need to be unreserved.
 908	 */
 909	xfs_trans_unreserve_and_mod_dquots(tp);
 910	if (tp->t_ticket) {
 911		if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log))
 912			xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
 913		else
 914			xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
 915		tp->t_ticket = NULL;
 916	}
 
 917	xfs_trans_free_items(tp, !!error);
 918	xfs_trans_free(tp);
 919
 920	XFS_STATS_INC(mp, xs_trans_empty);
 921	return error;
 922}
 923
 924int
 925xfs_trans_commit(
 926	struct xfs_trans	*tp)
 927{
 928	return __xfs_trans_commit(tp, false);
 929}
 930
 931/*
 932 * Unlock all of the transaction's items and free the transaction.
 933 * The transaction must not have modified any of its items, because
 934 * there is no way to restore them to their previous state.
 935 *
 936 * If the transaction has made a log reservation, make sure to release
 937 * it as well.
 938 */
 939void
 940xfs_trans_cancel(
 941	struct xfs_trans	*tp)
 942{
 943	struct xfs_mount	*mp = tp->t_mountp;
 944	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
 945
 946	trace_xfs_trans_cancel(tp, _RET_IP_);
 947
 948	if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
 949		xfs_defer_cancel(tp);
 950
 951	/*
 952	 * See if the caller is relying on us to shut down the
 953	 * filesystem.  This happens in paths where we detect
 954	 * corruption and decide to give up.
 955	 */
 956	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 957		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
 958		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 959	}
 960#ifdef DEBUG
 961	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 962		struct xfs_log_item *lip;
 963
 964		list_for_each_entry(lip, &tp->t_items, li_trans)
 965			ASSERT(!xlog_item_is_intent_done(lip));
 966	}
 967#endif
 968	xfs_trans_unreserve_and_mod_sb(tp);
 969	xfs_trans_unreserve_and_mod_dquots(tp);
 970
 971	if (tp->t_ticket) {
 972		xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
 973		tp->t_ticket = NULL;
 974	}
 975
 
 
 
 976	xfs_trans_free_items(tp, dirty);
 977	xfs_trans_free(tp);
 978}
 979
 980/*
 981 * Roll from one trans in the sequence of PERMANENT transactions to
 982 * the next: permanent transactions are only flushed out when
 983 * committed with xfs_trans_commit(), but we still want as soon
 984 * as possible to let chunks of it go to the log. So we commit the
 985 * chunk we've been working on and get a new transaction to continue.
 986 */
 987int
 988xfs_trans_roll(
 989	struct xfs_trans	**tpp)
 990{
 991	struct xfs_trans	*trans = *tpp;
 992	struct xfs_trans_res	tres;
 993	int			error;
 994
 995	trace_xfs_trans_roll(trans, _RET_IP_);
 996
 997	/*
 998	 * Copy the critical parameters from one trans to the next.
 999	 */
1000	tres.tr_logres = trans->t_log_res;
1001	tres.tr_logcount = trans->t_log_count;
1002
1003	*tpp = xfs_trans_dup(trans);
1004
1005	/*
1006	 * Commit the current transaction.
1007	 * If this commit failed, then it'd just unlock those items that
1008	 * are not marked ihold. That also means that a filesystem shutdown
1009	 * is in progress. The caller takes the responsibility to cancel
1010	 * the duplicate transaction that gets returned.
1011	 */
1012	error = __xfs_trans_commit(trans, true);
1013	if (error)
1014		return error;
1015
1016	/*
1017	 * Reserve space in the log for the next transaction.
1018	 * This also pushes items in the "AIL", the list of logged items,
1019	 * out to disk if they are taking up space at the tail of the log
1020	 * that we want to use.  This requires that either nothing be locked
1021	 * across this call, or that anything that is locked be logged in
1022	 * the prior and the next transactions.
1023	 */
1024	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1025	return xfs_trans_reserve(*tpp, &tres, 0, 0);
1026}
1027
1028/*
1029 * Allocate an transaction, lock and join the inode to it, and reserve quota.
1030 *
1031 * The caller must ensure that the on-disk dquots attached to this inode have
1032 * already been allocated and initialized.  The caller is responsible for
1033 * releasing ILOCK_EXCL if a new transaction is returned.
1034 */
1035int
1036xfs_trans_alloc_inode(
1037	struct xfs_inode	*ip,
1038	struct xfs_trans_res	*resv,
1039	unsigned int		dblocks,
1040	unsigned int		rblocks,
1041	bool			force,
1042	struct xfs_trans	**tpp)
1043{
1044	struct xfs_trans	*tp;
1045	struct xfs_mount	*mp = ip->i_mount;
1046	bool			retried = false;
1047	int			error;
1048
1049retry:
1050	error = xfs_trans_alloc(mp, resv, dblocks,
1051			rblocks / mp->m_sb.sb_rextsize,
1052			force ? XFS_TRANS_RESERVE : 0, &tp);
1053	if (error)
1054		return error;
1055
1056	xfs_ilock(ip, XFS_ILOCK_EXCL);
1057	xfs_trans_ijoin(tp, ip, 0);
1058
1059	error = xfs_qm_dqattach_locked(ip, false);
1060	if (error) {
1061		/* Caller should have allocated the dquots! */
1062		ASSERT(error != -ENOENT);
1063		goto out_cancel;
1064	}
1065
1066	error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
1067	if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1068		xfs_trans_cancel(tp);
1069		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1070		xfs_blockgc_free_quota(ip, 0);
1071		retried = true;
1072		goto retry;
1073	}
1074	if (error)
1075		goto out_cancel;
1076
1077	*tpp = tp;
1078	return 0;
1079
1080out_cancel:
1081	xfs_trans_cancel(tp);
1082	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1083	return error;
1084}
1085
1086/*
1087 * Allocate an transaction in preparation for inode creation by reserving quota
1088 * against the given dquots.  Callers are not required to hold any inode locks.
1089 */
1090int
1091xfs_trans_alloc_icreate(
1092	struct xfs_mount	*mp,
1093	struct xfs_trans_res	*resv,
1094	struct xfs_dquot	*udqp,
1095	struct xfs_dquot	*gdqp,
1096	struct xfs_dquot	*pdqp,
1097	unsigned int		dblocks,
1098	struct xfs_trans	**tpp)
1099{
1100	struct xfs_trans	*tp;
1101	bool			retried = false;
1102	int			error;
1103
1104retry:
1105	error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
1106	if (error)
1107		return error;
1108
1109	error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
1110	if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1111		xfs_trans_cancel(tp);
1112		xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1113		retried = true;
1114		goto retry;
1115	}
1116	if (error) {
1117		xfs_trans_cancel(tp);
1118		return error;
1119	}
1120
1121	*tpp = tp;
1122	return 0;
1123}
1124
1125/*
1126 * Allocate an transaction, lock and join the inode to it, and reserve quota
1127 * in preparation for inode attribute changes that include uid, gid, or prid
1128 * changes.
1129 *
1130 * The caller must ensure that the on-disk dquots attached to this inode have
1131 * already been allocated and initialized.  The ILOCK will be dropped when the
1132 * transaction is committed or cancelled.
1133 */
1134int
1135xfs_trans_alloc_ichange(
1136	struct xfs_inode	*ip,
1137	struct xfs_dquot	*new_udqp,
1138	struct xfs_dquot	*new_gdqp,
1139	struct xfs_dquot	*new_pdqp,
1140	bool			force,
1141	struct xfs_trans	**tpp)
1142{
1143	struct xfs_trans	*tp;
1144	struct xfs_mount	*mp = ip->i_mount;
1145	struct xfs_dquot	*udqp;
1146	struct xfs_dquot	*gdqp;
1147	struct xfs_dquot	*pdqp;
1148	bool			retried = false;
1149	int			error;
1150
1151retry:
1152	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1153	if (error)
1154		return error;
1155
1156	xfs_ilock(ip, XFS_ILOCK_EXCL);
1157	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1158
1159	error = xfs_qm_dqattach_locked(ip, false);
1160	if (error) {
1161		/* Caller should have allocated the dquots! */
1162		ASSERT(error != -ENOENT);
1163		goto out_cancel;
1164	}
1165
1166	/*
1167	 * For each quota type, skip quota reservations if the inode's dquots
1168	 * now match the ones that came from the caller, or the caller didn't
1169	 * pass one in.  The inode's dquots can change if we drop the ILOCK to
1170	 * perform a blockgc scan, so we must preserve the caller's arguments.
1171	 */
1172	udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
1173	gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
1174	pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
1175	if (udqp || gdqp || pdqp) {
1176		unsigned int	qflags = XFS_QMOPT_RES_REGBLKS;
1177
1178		if (force)
1179			qflags |= XFS_QMOPT_FORCE_RES;
1180
1181		/*
1182		 * Reserve enough quota to handle blocks on disk and reserved
1183		 * for a delayed allocation.  We'll actually transfer the
1184		 * delalloc reservation between dquots at chown time, even
1185		 * though that part is only semi-transactional.
1186		 */
1187		error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
1188				pdqp, ip->i_nblocks + ip->i_delayed_blks,
1189				1, qflags);
1190		if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
1191			xfs_trans_cancel(tp);
1192			xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
1193			retried = true;
1194			goto retry;
1195		}
1196		if (error)
1197			goto out_cancel;
1198	}
1199
1200	*tpp = tp;
1201	return 0;
1202
1203out_cancel:
1204	xfs_trans_cancel(tp);
1205	return error;
1206}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   4 * Copyright (C) 2010 Red Hat, Inc.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_fs.h"
   9#include "xfs_shared.h"
  10#include "xfs_format.h"
  11#include "xfs_log_format.h"
  12#include "xfs_log_priv.h"
  13#include "xfs_trans_resv.h"
  14#include "xfs_mount.h"
  15#include "xfs_extent_busy.h"
  16#include "xfs_quota.h"
  17#include "xfs_trans.h"
  18#include "xfs_trans_priv.h"
  19#include "xfs_log.h"
  20#include "xfs_trace.h"
  21#include "xfs_error.h"
  22#include "xfs_defer.h"
 
 
 
 
  23
  24kmem_zone_t	*xfs_trans_zone;
  25
  26#if defined(CONFIG_TRACEPOINTS)
  27static void
  28xfs_trans_trace_reservations(
  29	struct xfs_mount	*mp)
  30{
  31	struct xfs_trans_res	resv;
  32	struct xfs_trans_res	*res;
  33	struct xfs_trans_res	*end_res;
  34	int			i;
  35
  36	res = (struct xfs_trans_res *)M_RES(mp);
  37	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
  38	for (i = 0; res < end_res; i++, res++)
  39		trace_xfs_trans_resv_calc(mp, i, res);
  40	xfs_log_get_max_trans_res(mp, &resv);
  41	trace_xfs_trans_resv_calc(mp, -1, &resv);
  42}
  43#else
  44# define xfs_trans_trace_reservations(mp)
  45#endif
  46
  47/*
  48 * Initialize the precomputed transaction reservation values
  49 * in the mount structure.
  50 */
  51void
  52xfs_trans_init(
  53	struct xfs_mount	*mp)
  54{
  55	xfs_trans_resv_calc(mp, M_RES(mp));
  56	xfs_trans_trace_reservations(mp);
  57}
  58
  59/*
  60 * Free the transaction structure.  If there is more clean up
  61 * to do when the structure is freed, add it here.
  62 */
  63STATIC void
  64xfs_trans_free(
  65	struct xfs_trans	*tp)
  66{
  67	xfs_extent_busy_sort(&tp->t_busy);
  68	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  69
  70	trace_xfs_trans_free(tp, _RET_IP_);
 
  71	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  72		sb_end_intwrite(tp->t_mountp->m_super);
  73	xfs_trans_free_dqinfo(tp);
  74	kmem_cache_free(xfs_trans_zone, tp);
  75}
  76
  77/*
  78 * This is called to create a new transaction which will share the
  79 * permanent log reservation of the given transaction.  The remaining
  80 * unused block and rt extent reservations are also inherited.  This
  81 * implies that the original transaction is no longer allowed to allocate
  82 * blocks.  Locks and log items, however, are no inherited.  They must
  83 * be added to the new transaction explicitly.
  84 */
  85STATIC struct xfs_trans *
  86xfs_trans_dup(
  87	struct xfs_trans	*tp)
  88{
  89	struct xfs_trans	*ntp;
  90
  91	trace_xfs_trans_dup(tp, _RET_IP_);
  92
  93	ntp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
  94
  95	/*
  96	 * Initialize the new transaction structure.
  97	 */
  98	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
  99	ntp->t_mountp = tp->t_mountp;
 100	INIT_LIST_HEAD(&ntp->t_items);
 101	INIT_LIST_HEAD(&ntp->t_busy);
 102	INIT_LIST_HEAD(&ntp->t_dfops);
 103	ntp->t_firstblock = NULLFSBLOCK;
 104
 105	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 106	ASSERT(tp->t_ticket != NULL);
 107
 108	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 109		       (tp->t_flags & XFS_TRANS_RESERVE) |
 110		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) |
 111		       (tp->t_flags & XFS_TRANS_RES_FDBLKS);
 112	/* We gave our writer reference to the new transaction */
 113	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 114	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 115
 116	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
 117	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 118	tp->t_blk_res = tp->t_blk_res_used;
 119
 120	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 121	tp->t_rtx_res = tp->t_rtx_res_used;
 122	ntp->t_pflags = tp->t_pflags;
 
 123
 124	/* move deferred ops over to the new tp */
 125	xfs_defer_move(ntp, tp);
 126
 127	xfs_trans_dup_dqinfo(tp, ntp);
 128	return ntp;
 129}
 130
 131/*
 132 * This is called to reserve free disk blocks and log space for the
 133 * given transaction.  This must be done before allocating any resources
 134 * within the transaction.
 135 *
 136 * This will return ENOSPC if there are not enough blocks available.
 137 * It will sleep waiting for available log space.
 138 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 139 * is used by long running transactions.  If any one of the reservations
 140 * fails then they will all be backed out.
 141 *
 142 * This does not do quota reservations. That typically is done by the
 143 * caller afterwards.
 144 */
 145static int
 146xfs_trans_reserve(
 147	struct xfs_trans	*tp,
 148	struct xfs_trans_res	*resp,
 149	uint			blocks,
 150	uint			rtextents)
 151{
 152	struct xfs_mount	*mp = tp->t_mountp;
 153	int			error = 0;
 154	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 155
 156	/* Mark this thread as being in a transaction */
 157	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 158
 159	/*
 160	 * Attempt to reserve the needed disk blocks by decrementing
 161	 * the number needed from the number available.  This will
 162	 * fail if the count would go below zero.
 163	 */
 164	if (blocks > 0) {
 165		error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
 166		if (error != 0) {
 167			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 168			return -ENOSPC;
 169		}
 170		tp->t_blk_res += blocks;
 171	}
 172
 173	/*
 174	 * Reserve the log space needed for this transaction.
 175	 */
 176	if (resp->tr_logres > 0) {
 177		bool	permanent = false;
 178
 179		ASSERT(tp->t_log_res == 0 ||
 180		       tp->t_log_res == resp->tr_logres);
 181		ASSERT(tp->t_log_count == 0 ||
 182		       tp->t_log_count == resp->tr_logcount);
 183
 184		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 185			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 186			permanent = true;
 187		} else {
 188			ASSERT(tp->t_ticket == NULL);
 189			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 190		}
 191
 192		if (tp->t_ticket != NULL) {
 193			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 194			error = xfs_log_regrant(mp, tp->t_ticket);
 195		} else {
 196			error = xfs_log_reserve(mp,
 197						resp->tr_logres,
 198						resp->tr_logcount,
 199						&tp->t_ticket, XFS_TRANSACTION,
 200						permanent);
 201		}
 202
 203		if (error)
 204			goto undo_blocks;
 205
 206		tp->t_log_res = resp->tr_logres;
 207		tp->t_log_count = resp->tr_logcount;
 208	}
 209
 210	/*
 211	 * Attempt to reserve the needed realtime extents by decrementing
 212	 * the number needed from the number available.  This will
 213	 * fail if the count would go below zero.
 214	 */
 215	if (rtextents > 0) {
 216		error = xfs_mod_frextents(mp, -((int64_t)rtextents));
 217		if (error) {
 218			error = -ENOSPC;
 219			goto undo_log;
 220		}
 221		tp->t_rtx_res += rtextents;
 222	}
 223
 224	return 0;
 225
 226	/*
 227	 * Error cases jump to one of these labels to undo any
 228	 * reservations which have already been performed.
 229	 */
 230undo_log:
 231	if (resp->tr_logres > 0) {
 232		xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
 233		tp->t_ticket = NULL;
 234		tp->t_log_res = 0;
 235		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 236	}
 237
 238undo_blocks:
 239	if (blocks > 0) {
 240		xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
 241		tp->t_blk_res = 0;
 242	}
 243
 244	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 245
 246	return error;
 247}
 248
 249int
 250xfs_trans_alloc(
 251	struct xfs_mount	*mp,
 252	struct xfs_trans_res	*resp,
 253	uint			blocks,
 254	uint			rtextents,
 255	uint			flags,
 256	struct xfs_trans	**tpp)
 257{
 258	struct xfs_trans	*tp;
 
 259	int			error;
 260
 261	/*
 262	 * Allocate the handle before we do our freeze accounting and setting up
 263	 * GFP_NOFS allocation context so that we avoid lockdep false positives
 264	 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
 265	 */
 
 266	tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
 267	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 268		sb_start_intwrite(mp->m_super);
 
 269
 270	/*
 271	 * Zero-reservation ("empty") transactions can't modify anything, so
 272	 * they're allowed to run while we're frozen.
 273	 */
 274	WARN_ON(resp->tr_logres > 0 &&
 275		mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 276	ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
 277	       xfs_sb_version_haslazysbcount(&mp->m_sb));
 278
 279	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 280	tp->t_flags = flags;
 281	tp->t_mountp = mp;
 282	INIT_LIST_HEAD(&tp->t_items);
 283	INIT_LIST_HEAD(&tp->t_busy);
 284	INIT_LIST_HEAD(&tp->t_dfops);
 285	tp->t_firstblock = NULLFSBLOCK;
 286
 287	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288	if (error) {
 289		xfs_trans_cancel(tp);
 290		return error;
 291	}
 292
 293	trace_xfs_trans_alloc(tp, _RET_IP_);
 294
 295	*tpp = tp;
 296	return 0;
 297}
 298
 299/*
 300 * Create an empty transaction with no reservation.  This is a defensive
 301 * mechanism for routines that query metadata without actually modifying them --
 302 * if the metadata being queried is somehow cross-linked (think a btree block
 303 * pointer that points higher in the tree), we risk deadlock.  However, blocks
 304 * grabbed as part of a transaction can be re-grabbed.  The verifiers will
 305 * notice the corrupt block and the operation will fail back to userspace
 306 * without deadlocking.
 307 *
 308 * Note the zero-length reservation; this transaction MUST be cancelled without
 309 * any dirty data.
 310 *
 311 * Callers should obtain freeze protection to avoid a conflict with fs freezing
 312 * where we can be grabbing buffers at the same time that freeze is trying to
 313 * drain the buffer LRU list.
 314 */
 315int
 316xfs_trans_alloc_empty(
 317	struct xfs_mount		*mp,
 318	struct xfs_trans		**tpp)
 319{
 320	struct xfs_trans_res		resv = {0};
 321
 322	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
 323}
 324
 325/*
 326 * Record the indicated change to the given field for application
 327 * to the file system's superblock when the transaction commits.
 328 * For now, just store the change in the transaction structure.
 329 *
 330 * Mark the transaction structure to indicate that the superblock
 331 * needs to be updated before committing.
 332 *
 333 * Because we may not be keeping track of allocated/free inodes and
 334 * used filesystem blocks in the superblock, we do not mark the
 335 * superblock dirty in this transaction if we modify these fields.
 336 * We still need to update the transaction deltas so that they get
 337 * applied to the incore superblock, but we don't want them to
 338 * cause the superblock to get locked and logged if these are the
 339 * only fields in the superblock that the transaction modifies.
 340 */
 341void
 342xfs_trans_mod_sb(
 343	xfs_trans_t	*tp,
 344	uint		field,
 345	int64_t		delta)
 346{
 347	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 348	xfs_mount_t	*mp = tp->t_mountp;
 349
 350	switch (field) {
 351	case XFS_TRANS_SB_ICOUNT:
 352		tp->t_icount_delta += delta;
 353		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 354			flags &= ~XFS_TRANS_SB_DIRTY;
 355		break;
 356	case XFS_TRANS_SB_IFREE:
 357		tp->t_ifree_delta += delta;
 358		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 359			flags &= ~XFS_TRANS_SB_DIRTY;
 360		break;
 361	case XFS_TRANS_SB_FDBLOCKS:
 362		/*
 363		 * Track the number of blocks allocated in the transaction.
 364		 * Make sure it does not exceed the number reserved. If so,
 365		 * shutdown as this can lead to accounting inconsistency.
 366		 */
 367		if (delta < 0) {
 368			tp->t_blk_res_used += (uint)-delta;
 369			if (tp->t_blk_res_used > tp->t_blk_res)
 370				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 371		} else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) {
 372			int64_t	blkres_delta;
 373
 374			/*
 375			 * Return freed blocks directly to the reservation
 376			 * instead of the global pool, being careful not to
 377			 * overflow the trans counter. This is used to preserve
 378			 * reservation across chains of transaction rolls that
 379			 * repeatedly free and allocate blocks.
 380			 */
 381			blkres_delta = min_t(int64_t, delta,
 382					     UINT_MAX - tp->t_blk_res);
 383			tp->t_blk_res += blkres_delta;
 384			delta -= blkres_delta;
 385		}
 386		tp->t_fdblocks_delta += delta;
 387		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 388			flags &= ~XFS_TRANS_SB_DIRTY;
 389		break;
 390	case XFS_TRANS_SB_RES_FDBLOCKS:
 391		/*
 392		 * The allocation has already been applied to the
 393		 * in-core superblock's counter.  This should only
 394		 * be applied to the on-disk superblock.
 395		 */
 396		tp->t_res_fdblocks_delta += delta;
 397		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 398			flags &= ~XFS_TRANS_SB_DIRTY;
 399		break;
 400	case XFS_TRANS_SB_FREXTENTS:
 401		/*
 402		 * Track the number of blocks allocated in the
 403		 * transaction.  Make sure it does not exceed the
 404		 * number reserved.
 405		 */
 406		if (delta < 0) {
 407			tp->t_rtx_res_used += (uint)-delta;
 408			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 409		}
 410		tp->t_frextents_delta += delta;
 411		break;
 412	case XFS_TRANS_SB_RES_FREXTENTS:
 413		/*
 414		 * The allocation has already been applied to the
 415		 * in-core superblock's counter.  This should only
 416		 * be applied to the on-disk superblock.
 417		 */
 418		ASSERT(delta < 0);
 419		tp->t_res_frextents_delta += delta;
 420		break;
 421	case XFS_TRANS_SB_DBLOCKS:
 422		ASSERT(delta > 0);
 423		tp->t_dblocks_delta += delta;
 424		break;
 425	case XFS_TRANS_SB_AGCOUNT:
 426		ASSERT(delta > 0);
 427		tp->t_agcount_delta += delta;
 428		break;
 429	case XFS_TRANS_SB_IMAXPCT:
 430		tp->t_imaxpct_delta += delta;
 431		break;
 432	case XFS_TRANS_SB_REXTSIZE:
 433		tp->t_rextsize_delta += delta;
 434		break;
 435	case XFS_TRANS_SB_RBMBLOCKS:
 436		tp->t_rbmblocks_delta += delta;
 437		break;
 438	case XFS_TRANS_SB_RBLOCKS:
 439		tp->t_rblocks_delta += delta;
 440		break;
 441	case XFS_TRANS_SB_REXTENTS:
 442		tp->t_rextents_delta += delta;
 443		break;
 444	case XFS_TRANS_SB_REXTSLOG:
 445		tp->t_rextslog_delta += delta;
 446		break;
 447	default:
 448		ASSERT(0);
 449		return;
 450	}
 451
 452	tp->t_flags |= flags;
 453}
 454
 455/*
 456 * xfs_trans_apply_sb_deltas() is called from the commit code
 457 * to bring the superblock buffer into the current transaction
 458 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 459 *
 460 * For now we just look at each field allowed to change and change
 461 * it if necessary.
 462 */
 463STATIC void
 464xfs_trans_apply_sb_deltas(
 465	xfs_trans_t	*tp)
 466{
 467	xfs_dsb_t	*sbp;
 468	xfs_buf_t	*bp;
 469	int		whole = 0;
 470
 471	bp = xfs_trans_getsb(tp, tp->t_mountp);
 472	sbp = bp->b_addr;
 473
 474	/*
 475	 * Check that superblock mods match the mods made to AGF counters.
 476	 */
 477	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 478	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 479		tp->t_ag_btree_delta));
 480
 481	/*
 482	 * Only update the superblock counters if we are logging them
 483	 */
 484	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 485		if (tp->t_icount_delta)
 486			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 487		if (tp->t_ifree_delta)
 488			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 489		if (tp->t_fdblocks_delta)
 490			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 491		if (tp->t_res_fdblocks_delta)
 492			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 493	}
 494
 495	if (tp->t_frextents_delta)
 496		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 497	if (tp->t_res_frextents_delta)
 498		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 499
 500	if (tp->t_dblocks_delta) {
 501		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 502		whole = 1;
 503	}
 504	if (tp->t_agcount_delta) {
 505		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 506		whole = 1;
 507	}
 508	if (tp->t_imaxpct_delta) {
 509		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 510		whole = 1;
 511	}
 512	if (tp->t_rextsize_delta) {
 513		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 514		whole = 1;
 515	}
 516	if (tp->t_rbmblocks_delta) {
 517		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 518		whole = 1;
 519	}
 520	if (tp->t_rblocks_delta) {
 521		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 522		whole = 1;
 523	}
 524	if (tp->t_rextents_delta) {
 525		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 526		whole = 1;
 527	}
 528	if (tp->t_rextslog_delta) {
 529		sbp->sb_rextslog += tp->t_rextslog_delta;
 530		whole = 1;
 531	}
 532
 533	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 534	if (whole)
 535		/*
 536		 * Log the whole thing, the fields are noncontiguous.
 537		 */
 538		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 539	else
 540		/*
 541		 * Since all the modifiable fields are contiguous, we
 542		 * can get away with this.
 543		 */
 544		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 545				  offsetof(xfs_dsb_t, sb_frextents) +
 546				  sizeof(sbp->sb_frextents) - 1);
 547}
 548
 549/*
 550 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
 551 * apply superblock counter changes to the in-core superblock.  The
 552 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 553 * applied to the in-core superblock.  The idea is that that has already been
 554 * done.
 555 *
 556 * If we are not logging superblock counters, then the inode allocated/free and
 557 * used block counts are not updated in the on disk superblock. In this case,
 558 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 559 * still need to update the incore superblock with the changes.
 560 *
 561 * Deltas for the inode count are +/-64, hence we use a large batch size of 128
 562 * so we don't need to take the counter lock on every update.
 563 */
 564#define XFS_ICOUNT_BATCH	128
 565
 566void
 567xfs_trans_unreserve_and_mod_sb(
 568	struct xfs_trans	*tp)
 569{
 570	struct xfs_mount	*mp = tp->t_mountp;
 571	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 572	int64_t			blkdelta = 0;
 573	int64_t			rtxdelta = 0;
 574	int64_t			idelta = 0;
 575	int64_t			ifreedelta = 0;
 576	int			error;
 577
 578	/* calculate deltas */
 579	if (tp->t_blk_res > 0)
 580		blkdelta = tp->t_blk_res;
 581	if ((tp->t_fdblocks_delta != 0) &&
 582	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 583	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 584	        blkdelta += tp->t_fdblocks_delta;
 585
 586	if (tp->t_rtx_res > 0)
 587		rtxdelta = tp->t_rtx_res;
 588	if ((tp->t_frextents_delta != 0) &&
 589	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 590		rtxdelta += tp->t_frextents_delta;
 591
 592	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 593	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 594		idelta = tp->t_icount_delta;
 595		ifreedelta = tp->t_ifree_delta;
 596	}
 597
 598	/* apply the per-cpu counters */
 599	if (blkdelta) {
 600		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 601		ASSERT(!error);
 602	}
 603
 604	if (idelta) {
 605		percpu_counter_add_batch(&mp->m_icount, idelta,
 606					 XFS_ICOUNT_BATCH);
 607		if (idelta < 0)
 608			ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
 609							XFS_ICOUNT_BATCH) >= 0);
 610	}
 611
 612	if (ifreedelta) {
 613		percpu_counter_add(&mp->m_ifree, ifreedelta);
 614		if (ifreedelta < 0)
 615			ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
 616	}
 617
 618	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 619		return;
 620
 621	/* apply remaining deltas */
 622	spin_lock(&mp->m_sb_lock);
 
 
 
 623	mp->m_sb.sb_frextents += rtxdelta;
 624	mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
 625	mp->m_sb.sb_agcount += tp->t_agcount_delta;
 626	mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
 627	mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
 628	mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
 629	mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
 630	mp->m_sb.sb_rextents += tp->t_rextents_delta;
 631	mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
 632	spin_unlock(&mp->m_sb_lock);
 633
 634	/*
 635	 * Debug checks outside of the spinlock so they don't lock up the
 636	 * machine if they fail.
 637	 */
 638	ASSERT(mp->m_sb.sb_imax_pct >= 0);
 639	ASSERT(mp->m_sb.sb_rextslog >= 0);
 640	return;
 641}
 642
 643/* Add the given log item to the transaction's list of log items. */
 644void
 645xfs_trans_add_item(
 646	struct xfs_trans	*tp,
 647	struct xfs_log_item	*lip)
 648{
 649	ASSERT(lip->li_mountp == tp->t_mountp);
 650	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 651	ASSERT(list_empty(&lip->li_trans));
 652	ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
 653
 654	list_add_tail(&lip->li_trans, &tp->t_items);
 655	trace_xfs_trans_add_item(tp, _RET_IP_);
 656}
 657
 658/*
 659 * Unlink the log item from the transaction. the log item is no longer
 660 * considered dirty in this transaction, as the linked transaction has
 661 * finished, either by abort or commit completion.
 662 */
 663void
 664xfs_trans_del_item(
 665	struct xfs_log_item	*lip)
 666{
 667	clear_bit(XFS_LI_DIRTY, &lip->li_flags);
 668	list_del_init(&lip->li_trans);
 669}
 670
 671/* Detach and unlock all of the items in a transaction */
 672static void
 673xfs_trans_free_items(
 674	struct xfs_trans	*tp,
 675	bool			abort)
 676{
 677	struct xfs_log_item	*lip, *next;
 678
 679	trace_xfs_trans_free_items(tp, _RET_IP_);
 680
 681	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
 682		xfs_trans_del_item(lip);
 683		if (abort)
 684			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 685		if (lip->li_ops->iop_release)
 686			lip->li_ops->iop_release(lip);
 687	}
 688}
 689
 690static inline void
 691xfs_log_item_batch_insert(
 692	struct xfs_ail		*ailp,
 693	struct xfs_ail_cursor	*cur,
 694	struct xfs_log_item	**log_items,
 695	int			nr_items,
 696	xfs_lsn_t		commit_lsn)
 697{
 698	int	i;
 699
 700	spin_lock(&ailp->ail_lock);
 701	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 702	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 703
 704	for (i = 0; i < nr_items; i++) {
 705		struct xfs_log_item *lip = log_items[i];
 706
 707		if (lip->li_ops->iop_unpin)
 708			lip->li_ops->iop_unpin(lip, 0);
 709	}
 710}
 711
 712/*
 713 * Bulk operation version of xfs_trans_committed that takes a log vector of
 714 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 715 * minimise lock traffic.
 716 *
 717 * If we are called with the aborted flag set, it is because a log write during
 718 * a CIL checkpoint commit has failed. In this case, all the items in the
 719 * checkpoint have already gone through iop_committed and iop_committing, which
 720 * means that checkpoint commit abort handling is treated exactly the same
 721 * as an iclog write error even though we haven't started any IO yet. Hence in
 722 * this case all we need to do is iop_committed processing, followed by an
 723 * iop_unpin(aborted) call.
 724 *
 725 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 726 * at the end of the AIL, the insert cursor avoids the need to walk
 727 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 728 * call. This saves a lot of needless list walking and is a net win, even
 729 * though it slightly increases that amount of AIL lock traffic to set it up
 730 * and tear it down.
 731 */
 732void
 733xfs_trans_committed_bulk(
 734	struct xfs_ail		*ailp,
 735	struct xfs_log_vec	*log_vector,
 736	xfs_lsn_t		commit_lsn,
 737	bool			aborted)
 738{
 739#define LOG_ITEM_BATCH_SIZE	32
 740	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 741	struct xfs_log_vec	*lv;
 742	struct xfs_ail_cursor	cur;
 743	int			i = 0;
 744
 745	spin_lock(&ailp->ail_lock);
 746	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 747	spin_unlock(&ailp->ail_lock);
 748
 749	/* unpin all the log items */
 750	for (lv = log_vector; lv; lv = lv->lv_next ) {
 751		struct xfs_log_item	*lip = lv->lv_item;
 752		xfs_lsn_t		item_lsn;
 753
 754		if (aborted)
 755			set_bit(XFS_LI_ABORTED, &lip->li_flags);
 756
 757		if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
 758			lip->li_ops->iop_release(lip);
 759			continue;
 760		}
 761
 762		if (lip->li_ops->iop_committed)
 763			item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 764		else
 765			item_lsn = commit_lsn;
 766
 767		/* item_lsn of -1 means the item needs no further processing */
 768		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 769			continue;
 770
 771		/*
 772		 * if we are aborting the operation, no point in inserting the
 773		 * object into the AIL as we are in a shutdown situation.
 774		 */
 775		if (aborted) {
 776			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
 777			if (lip->li_ops->iop_unpin)
 778				lip->li_ops->iop_unpin(lip, 1);
 779			continue;
 780		}
 781
 782		if (item_lsn != commit_lsn) {
 783
 784			/*
 785			 * Not a bulk update option due to unusual item_lsn.
 786			 * Push into AIL immediately, rechecking the lsn once
 787			 * we have the ail lock. Then unpin the item. This does
 788			 * not affect the AIL cursor the bulk insert path is
 789			 * using.
 790			 */
 791			spin_lock(&ailp->ail_lock);
 792			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 793				xfs_trans_ail_update(ailp, lip, item_lsn);
 794			else
 795				spin_unlock(&ailp->ail_lock);
 796			if (lip->li_ops->iop_unpin)
 797				lip->li_ops->iop_unpin(lip, 0);
 798			continue;
 799		}
 800
 801		/* Item is a candidate for bulk AIL insert.  */
 802		log_items[i++] = lv->lv_item;
 803		if (i >= LOG_ITEM_BATCH_SIZE) {
 804			xfs_log_item_batch_insert(ailp, &cur, log_items,
 805					LOG_ITEM_BATCH_SIZE, commit_lsn);
 806			i = 0;
 807		}
 808	}
 809
 810	/* make sure we insert the remainder! */
 811	if (i)
 812		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 813
 814	spin_lock(&ailp->ail_lock);
 815	xfs_trans_ail_cursor_done(&cur);
 816	spin_unlock(&ailp->ail_lock);
 817}
 818
 819/*
 820 * Commit the given transaction to the log.
 821 *
 822 * XFS disk error handling mechanism is not based on a typical
 823 * transaction abort mechanism. Logically after the filesystem
 824 * gets marked 'SHUTDOWN', we can't let any new transactions
 825 * be durable - ie. committed to disk - because some metadata might
 826 * be inconsistent. In such cases, this returns an error, and the
 827 * caller may assume that all locked objects joined to the transaction
 828 * have already been unlocked as if the commit had succeeded.
 829 * Do not reference the transaction structure after this call.
 830 */
 831static int
 832__xfs_trans_commit(
 833	struct xfs_trans	*tp,
 834	bool			regrant)
 835{
 836	struct xfs_mount	*mp = tp->t_mountp;
 837	xfs_lsn_t		commit_lsn = -1;
 838	int			error = 0;
 839	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 840
 841	trace_xfs_trans_commit(tp, _RET_IP_);
 842
 843	/*
 844	 * Finish deferred items on final commit. Only permanent transactions
 845	 * should ever have deferred ops.
 846	 */
 847	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
 848		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 849	if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
 850		error = xfs_defer_finish_noroll(&tp);
 851		if (error)
 852			goto out_unreserve;
 853	}
 854
 855	/*
 856	 * If there is nothing to be logged by the transaction,
 857	 * then unlock all of the items associated with the
 858	 * transaction and free the transaction structure.
 859	 * Also make sure to return any reserved blocks to
 860	 * the free pool.
 861	 */
 862	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 863		goto out_unreserve;
 864
 865	if (XFS_FORCED_SHUTDOWN(mp)) {
 866		error = -EIO;
 867		goto out_unreserve;
 868	}
 869
 870	ASSERT(tp->t_ticket != NULL);
 871
 872	/*
 873	 * If we need to update the superblock, then do it now.
 874	 */
 875	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 876		xfs_trans_apply_sb_deltas(tp);
 877	xfs_trans_apply_dquot_deltas(tp);
 878
 879	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 880
 881	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 882	xfs_trans_free(tp);
 883
 884	/*
 885	 * If the transaction needs to be synchronous, then force the
 886	 * log out now and wait for it.
 887	 */
 888	if (sync) {
 889		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 890		XFS_STATS_INC(mp, xs_trans_sync);
 891	} else {
 892		XFS_STATS_INC(mp, xs_trans_async);
 893	}
 894
 895	return error;
 896
 897out_unreserve:
 898	xfs_trans_unreserve_and_mod_sb(tp);
 899
 900	/*
 901	 * It is indeed possible for the transaction to be not dirty but
 902	 * the dqinfo portion to be.  All that means is that we have some
 903	 * (non-persistent) quota reservations that need to be unreserved.
 904	 */
 905	xfs_trans_unreserve_and_mod_dquots(tp);
 906	if (tp->t_ticket) {
 907		if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log))
 908			xfs_log_ticket_regrant(mp->m_log, tp->t_ticket);
 909		else
 910			xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
 911		tp->t_ticket = NULL;
 912	}
 913	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 914	xfs_trans_free_items(tp, !!error);
 915	xfs_trans_free(tp);
 916
 917	XFS_STATS_INC(mp, xs_trans_empty);
 918	return error;
 919}
 920
 921int
 922xfs_trans_commit(
 923	struct xfs_trans	*tp)
 924{
 925	return __xfs_trans_commit(tp, false);
 926}
 927
 928/*
 929 * Unlock all of the transaction's items and free the transaction.
 930 * The transaction must not have modified any of its items, because
 931 * there is no way to restore them to their previous state.
 932 *
 933 * If the transaction has made a log reservation, make sure to release
 934 * it as well.
 935 */
 936void
 937xfs_trans_cancel(
 938	struct xfs_trans	*tp)
 939{
 940	struct xfs_mount	*mp = tp->t_mountp;
 941	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
 942
 943	trace_xfs_trans_cancel(tp, _RET_IP_);
 944
 945	if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
 946		xfs_defer_cancel(tp);
 947
 948	/*
 949	 * See if the caller is relying on us to shut down the
 950	 * filesystem.  This happens in paths where we detect
 951	 * corruption and decide to give up.
 952	 */
 953	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 954		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
 955		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 956	}
 957#ifdef DEBUG
 958	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 959		struct xfs_log_item *lip;
 960
 961		list_for_each_entry(lip, &tp->t_items, li_trans)
 962			ASSERT(!(lip->li_type == XFS_LI_EFD));
 963	}
 964#endif
 965	xfs_trans_unreserve_and_mod_sb(tp);
 966	xfs_trans_unreserve_and_mod_dquots(tp);
 967
 968	if (tp->t_ticket) {
 969		xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
 970		tp->t_ticket = NULL;
 971	}
 972
 973	/* mark this thread as no longer being in a transaction */
 974	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 975
 976	xfs_trans_free_items(tp, dirty);
 977	xfs_trans_free(tp);
 978}
 979
 980/*
 981 * Roll from one trans in the sequence of PERMANENT transactions to
 982 * the next: permanent transactions are only flushed out when
 983 * committed with xfs_trans_commit(), but we still want as soon
 984 * as possible to let chunks of it go to the log. So we commit the
 985 * chunk we've been working on and get a new transaction to continue.
 986 */
 987int
 988xfs_trans_roll(
 989	struct xfs_trans	**tpp)
 990{
 991	struct xfs_trans	*trans = *tpp;
 992	struct xfs_trans_res	tres;
 993	int			error;
 994
 995	trace_xfs_trans_roll(trans, _RET_IP_);
 996
 997	/*
 998	 * Copy the critical parameters from one trans to the next.
 999	 */
1000	tres.tr_logres = trans->t_log_res;
1001	tres.tr_logcount = trans->t_log_count;
1002
1003	*tpp = xfs_trans_dup(trans);
1004
1005	/*
1006	 * Commit the current transaction.
1007	 * If this commit failed, then it'd just unlock those items that
1008	 * are not marked ihold. That also means that a filesystem shutdown
1009	 * is in progress. The caller takes the responsibility to cancel
1010	 * the duplicate transaction that gets returned.
1011	 */
1012	error = __xfs_trans_commit(trans, true);
1013	if (error)
1014		return error;
1015
1016	/*
1017	 * Reserve space in the log for the next transaction.
1018	 * This also pushes items in the "AIL", the list of logged items,
1019	 * out to disk if they are taking up space at the tail of the log
1020	 * that we want to use.  This requires that either nothing be locked
1021	 * across this call, or that anything that is locked be logged in
1022	 * the prior and the next transactions.
1023	 */
1024	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1025	return xfs_trans_reserve(*tpp, &tres, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026}