Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.17
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
 
 
  25#include "xfs_mount.h"
  26#include "xfs_inode.h"
  27#include "xfs_extent_busy.h"
  28#include "xfs_quota.h"
  29#include "xfs_trans.h"
  30#include "xfs_trans_priv.h"
  31#include "xfs_log.h"
  32#include "xfs_trace.h"
  33#include "xfs_error.h"
  34
  35kmem_zone_t	*xfs_trans_zone;
  36kmem_zone_t	*xfs_log_item_desc_zone;
  37
  38#if defined(CONFIG_TRACEPOINTS)
  39static void
  40xfs_trans_trace_reservations(
  41	struct xfs_mount	*mp)
  42{
  43	struct xfs_trans_res	resv;
  44	struct xfs_trans_res	*res;
  45	struct xfs_trans_res	*end_res;
  46	int			i;
  47
  48	res = (struct xfs_trans_res *)M_RES(mp);
  49	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
  50	for (i = 0; res < end_res; i++, res++)
  51		trace_xfs_trans_resv_calc(mp, i, res);
  52	xfs_log_get_max_trans_res(mp, &resv);
  53	trace_xfs_trans_resv_calc(mp, -1, &resv);
  54}
  55#else
  56# define xfs_trans_trace_reservations(mp)
  57#endif
  58
  59/*
  60 * Initialize the precomputed transaction reservation values
  61 * in the mount structure.
  62 */
  63void
  64xfs_trans_init(
  65	struct xfs_mount	*mp)
  66{
  67	xfs_trans_resv_calc(mp, M_RES(mp));
  68	xfs_trans_trace_reservations(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69}
  70
  71/*
  72 * Free the transaction structure.  If there is more clean up
  73 * to do when the structure is freed, add it here.
  74 */
  75STATIC void
  76xfs_trans_free(
  77	struct xfs_trans	*tp)
  78{
  79	xfs_extent_busy_sort(&tp->t_busy);
  80	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  81
  82	atomic_dec(&tp->t_mountp->m_active_trans);
  83	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  84		sb_end_intwrite(tp->t_mountp->m_super);
  85	xfs_trans_free_dqinfo(tp);
  86	kmem_zone_free(xfs_trans_zone, tp);
  87}
  88
  89/*
  90 * This is called to create a new transaction which will share the
  91 * permanent log reservation of the given transaction.  The remaining
  92 * unused block and rt extent reservations are also inherited.  This
  93 * implies that the original transaction is no longer allowed to allocate
  94 * blocks.  Locks and log items, however, are no inherited.  They must
  95 * be added to the new transaction explicitly.
  96 */
  97STATIC xfs_trans_t *
  98xfs_trans_dup(
  99	xfs_trans_t	*tp)
 100{
 101	xfs_trans_t	*ntp;
 102
 103	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
 104
 105	/*
 106	 * Initialize the new transaction structure.
 107	 */
 108	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 
 109	ntp->t_mountp = tp->t_mountp;
 110	INIT_LIST_HEAD(&ntp->t_items);
 111	INIT_LIST_HEAD(&ntp->t_busy);
 112
 113	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 114	ASSERT(tp->t_ticket != NULL);
 115
 116	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 117		       (tp->t_flags & XFS_TRANS_RESERVE) |
 118		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
 119	/* We gave our writer reference to the new transaction */
 120	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 121	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 122
 123	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
 124	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 125	tp->t_blk_res = tp->t_blk_res_used;
 126
 127	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 128	tp->t_rtx_res = tp->t_rtx_res_used;
 129	ntp->t_pflags = tp->t_pflags;
 130
 131	xfs_trans_dup_dqinfo(tp, ntp);
 132
 133	atomic_inc(&tp->t_mountp->m_active_trans);
 134	return ntp;
 135}
 136
 137/*
 138 * This is called to reserve free disk blocks and log space for the
 139 * given transaction.  This must be done before allocating any resources
 140 * within the transaction.
 141 *
 142 * This will return ENOSPC if there are not enough blocks available.
 143 * It will sleep waiting for available log space.
 144 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 145 * is used by long running transactions.  If any one of the reservations
 146 * fails then they will all be backed out.
 147 *
 148 * This does not do quota reservations. That typically is done by the
 149 * caller afterwards.
 150 */
 151static int
 152xfs_trans_reserve(
 153	struct xfs_trans	*tp,
 154	struct xfs_trans_res	*resp,
 155	uint			blocks,
 156	uint			rtextents)
 157{
 158	int		error = 0;
 159	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 160
 161	/* Mark this thread as being in a transaction */
 162	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 163
 164	/*
 165	 * Attempt to reserve the needed disk blocks by decrementing
 166	 * the number needed from the number available.  This will
 167	 * fail if the count would go below zero.
 168	 */
 169	if (blocks > 0) {
 170		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 
 171		if (error != 0) {
 172			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 173			return -ENOSPC;
 174		}
 175		tp->t_blk_res += blocks;
 176	}
 177
 178	/*
 179	 * Reserve the log space needed for this transaction.
 180	 */
 181	if (resp->tr_logres > 0) {
 182		bool	permanent = false;
 183
 184		ASSERT(tp->t_log_res == 0 ||
 185		       tp->t_log_res == resp->tr_logres);
 186		ASSERT(tp->t_log_count == 0 ||
 187		       tp->t_log_count == resp->tr_logcount);
 188
 189		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 190			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 191			permanent = true;
 192		} else {
 193			ASSERT(tp->t_ticket == NULL);
 194			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 195		}
 196
 197		if (tp->t_ticket != NULL) {
 198			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 199			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 200		} else {
 201			error = xfs_log_reserve(tp->t_mountp,
 202						resp->tr_logres,
 203						resp->tr_logcount,
 204						&tp->t_ticket, XFS_TRANSACTION,
 205						permanent);
 206		}
 207
 208		if (error)
 209			goto undo_blocks;
 210
 211		tp->t_log_res = resp->tr_logres;
 212		tp->t_log_count = resp->tr_logcount;
 213	}
 214
 215	/*
 216	 * Attempt to reserve the needed realtime extents by decrementing
 217	 * the number needed from the number available.  This will
 218	 * fail if the count would go below zero.
 219	 */
 220	if (rtextents > 0) {
 221		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
 
 222		if (error) {
 223			error = -ENOSPC;
 224			goto undo_log;
 225		}
 226		tp->t_rtx_res += rtextents;
 227	}
 228
 229	return 0;
 230
 231	/*
 232	 * Error cases jump to one of these labels to undo any
 233	 * reservations which have already been performed.
 234	 */
 235undo_log:
 236	if (resp->tr_logres > 0) {
 237		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
 
 
 
 
 
 
 
 238		tp->t_ticket = NULL;
 239		tp->t_log_res = 0;
 240		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 241	}
 242
 243undo_blocks:
 244	if (blocks > 0) {
 245		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
 
 246		tp->t_blk_res = 0;
 247	}
 248
 249	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 250
 251	return error;
 252}
 253
 254int
 255xfs_trans_alloc(
 256	struct xfs_mount	*mp,
 257	struct xfs_trans_res	*resp,
 258	uint			blocks,
 259	uint			rtextents,
 260	uint			flags,
 261	struct xfs_trans	**tpp)
 262{
 263	struct xfs_trans	*tp;
 264	int			error;
 265
 266	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 267		sb_start_intwrite(mp->m_super);
 268
 269	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 270	atomic_inc(&mp->m_active_trans);
 271
 272	tp = kmem_zone_zalloc(xfs_trans_zone,
 273		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
 274	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 275	tp->t_flags = flags;
 276	tp->t_mountp = mp;
 277	INIT_LIST_HEAD(&tp->t_items);
 278	INIT_LIST_HEAD(&tp->t_busy);
 279
 280	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 281	if (error) {
 282		xfs_trans_cancel(tp);
 283		return error;
 284	}
 285
 286	*tpp = tp;
 287	return 0;
 288}
 289
 290/*
 291 * Create an empty transaction with no reservation.  This is a defensive
 292 * mechanism for routines that query metadata without actually modifying
 293 * them -- if the metadata being queried is somehow cross-linked (think a
 294 * btree block pointer that points higher in the tree), we risk deadlock.
 295 * However, blocks grabbed as part of a transaction can be re-grabbed.
 296 * The verifiers will notice the corrupt block and the operation will fail
 297 * back to userspace without deadlocking.
 298 *
 299 * Note the zero-length reservation; this transaction MUST be cancelled
 300 * without any dirty data.
 301 */
 302int
 303xfs_trans_alloc_empty(
 304	struct xfs_mount		*mp,
 305	struct xfs_trans		**tpp)
 306{
 307	struct xfs_trans_res		resv = {0};
 308
 309	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
 310}
 311
 312/*
 313 * Record the indicated change to the given field for application
 314 * to the file system's superblock when the transaction commits.
 315 * For now, just store the change in the transaction structure.
 316 *
 317 * Mark the transaction structure to indicate that the superblock
 318 * needs to be updated before committing.
 319 *
 320 * Because we may not be keeping track of allocated/free inodes and
 321 * used filesystem blocks in the superblock, we do not mark the
 322 * superblock dirty in this transaction if we modify these fields.
 323 * We still need to update the transaction deltas so that they get
 324 * applied to the incore superblock, but we don't want them to
 325 * cause the superblock to get locked and logged if these are the
 326 * only fields in the superblock that the transaction modifies.
 327 */
 328void
 329xfs_trans_mod_sb(
 330	xfs_trans_t	*tp,
 331	uint		field,
 332	int64_t		delta)
 333{
 334	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 335	xfs_mount_t	*mp = tp->t_mountp;
 336
 337	switch (field) {
 338	case XFS_TRANS_SB_ICOUNT:
 339		tp->t_icount_delta += delta;
 340		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 341			flags &= ~XFS_TRANS_SB_DIRTY;
 342		break;
 343	case XFS_TRANS_SB_IFREE:
 344		tp->t_ifree_delta += delta;
 345		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 346			flags &= ~XFS_TRANS_SB_DIRTY;
 347		break;
 348	case XFS_TRANS_SB_FDBLOCKS:
 349		/*
 350		 * Track the number of blocks allocated in the transaction.
 351		 * Make sure it does not exceed the number reserved. If so,
 352		 * shutdown as this can lead to accounting inconsistency.
 353		 */
 354		if (delta < 0) {
 355			tp->t_blk_res_used += (uint)-delta;
 356			if (tp->t_blk_res_used > tp->t_blk_res)
 357				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 358		}
 359		tp->t_fdblocks_delta += delta;
 360		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 361			flags &= ~XFS_TRANS_SB_DIRTY;
 362		break;
 363	case XFS_TRANS_SB_RES_FDBLOCKS:
 364		/*
 365		 * The allocation has already been applied to the
 366		 * in-core superblock's counter.  This should only
 367		 * be applied to the on-disk superblock.
 368		 */
 
 369		tp->t_res_fdblocks_delta += delta;
 370		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 371			flags &= ~XFS_TRANS_SB_DIRTY;
 372		break;
 373	case XFS_TRANS_SB_FREXTENTS:
 374		/*
 375		 * Track the number of blocks allocated in the
 376		 * transaction.  Make sure it does not exceed the
 377		 * number reserved.
 378		 */
 379		if (delta < 0) {
 380			tp->t_rtx_res_used += (uint)-delta;
 381			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 382		}
 383		tp->t_frextents_delta += delta;
 384		break;
 385	case XFS_TRANS_SB_RES_FREXTENTS:
 386		/*
 387		 * The allocation has already been applied to the
 388		 * in-core superblock's counter.  This should only
 389		 * be applied to the on-disk superblock.
 390		 */
 391		ASSERT(delta < 0);
 392		tp->t_res_frextents_delta += delta;
 393		break;
 394	case XFS_TRANS_SB_DBLOCKS:
 395		ASSERT(delta > 0);
 396		tp->t_dblocks_delta += delta;
 397		break;
 398	case XFS_TRANS_SB_AGCOUNT:
 399		ASSERT(delta > 0);
 400		tp->t_agcount_delta += delta;
 401		break;
 402	case XFS_TRANS_SB_IMAXPCT:
 403		tp->t_imaxpct_delta += delta;
 404		break;
 405	case XFS_TRANS_SB_REXTSIZE:
 406		tp->t_rextsize_delta += delta;
 407		break;
 408	case XFS_TRANS_SB_RBMBLOCKS:
 409		tp->t_rbmblocks_delta += delta;
 410		break;
 411	case XFS_TRANS_SB_RBLOCKS:
 412		tp->t_rblocks_delta += delta;
 413		break;
 414	case XFS_TRANS_SB_REXTENTS:
 415		tp->t_rextents_delta += delta;
 416		break;
 417	case XFS_TRANS_SB_REXTSLOG:
 418		tp->t_rextslog_delta += delta;
 419		break;
 420	default:
 421		ASSERT(0);
 422		return;
 423	}
 424
 425	tp->t_flags |= flags;
 426}
 427
 428/*
 429 * xfs_trans_apply_sb_deltas() is called from the commit code
 430 * to bring the superblock buffer into the current transaction
 431 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 432 *
 433 * For now we just look at each field allowed to change and change
 434 * it if necessary.
 435 */
 436STATIC void
 437xfs_trans_apply_sb_deltas(
 438	xfs_trans_t	*tp)
 439{
 440	xfs_dsb_t	*sbp;
 441	xfs_buf_t	*bp;
 442	int		whole = 0;
 443
 444	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 445	sbp = XFS_BUF_TO_SBP(bp);
 446
 447	/*
 448	 * Check that superblock mods match the mods made to AGF counters.
 449	 */
 450	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 451	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 452		tp->t_ag_btree_delta));
 453
 454	/*
 455	 * Only update the superblock counters if we are logging them
 456	 */
 457	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 458		if (tp->t_icount_delta)
 459			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 460		if (tp->t_ifree_delta)
 461			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 462		if (tp->t_fdblocks_delta)
 463			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 464		if (tp->t_res_fdblocks_delta)
 465			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 466	}
 467
 468	if (tp->t_frextents_delta)
 469		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 470	if (tp->t_res_frextents_delta)
 471		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 472
 473	if (tp->t_dblocks_delta) {
 474		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 475		whole = 1;
 476	}
 477	if (tp->t_agcount_delta) {
 478		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 479		whole = 1;
 480	}
 481	if (tp->t_imaxpct_delta) {
 482		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 483		whole = 1;
 484	}
 485	if (tp->t_rextsize_delta) {
 486		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 487		whole = 1;
 488	}
 489	if (tp->t_rbmblocks_delta) {
 490		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 491		whole = 1;
 492	}
 493	if (tp->t_rblocks_delta) {
 494		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 495		whole = 1;
 496	}
 497	if (tp->t_rextents_delta) {
 498		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 499		whole = 1;
 500	}
 501	if (tp->t_rextslog_delta) {
 502		sbp->sb_rextslog += tp->t_rextslog_delta;
 503		whole = 1;
 504	}
 505
 506	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 507	if (whole)
 508		/*
 509		 * Log the whole thing, the fields are noncontiguous.
 510		 */
 511		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 512	else
 513		/*
 514		 * Since all the modifiable fields are contiguous, we
 515		 * can get away with this.
 516		 */
 517		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 518				  offsetof(xfs_dsb_t, sb_frextents) +
 519				  sizeof(sbp->sb_frextents) - 1);
 520}
 521
 522STATIC int
 523xfs_sb_mod8(
 524	uint8_t			*field,
 525	int8_t			delta)
 526{
 527	int8_t			counter = *field;
 528
 529	counter += delta;
 530	if (counter < 0) {
 531		ASSERT(0);
 532		return -EINVAL;
 533	}
 534	*field = counter;
 535	return 0;
 536}
 537
 538STATIC int
 539xfs_sb_mod32(
 540	uint32_t		*field,
 541	int32_t			delta)
 542{
 543	int32_t			counter = *field;
 544
 545	counter += delta;
 546	if (counter < 0) {
 547		ASSERT(0);
 548		return -EINVAL;
 549	}
 550	*field = counter;
 551	return 0;
 552}
 553
 554STATIC int
 555xfs_sb_mod64(
 556	uint64_t		*field,
 557	int64_t			delta)
 558{
 559	int64_t			counter = *field;
 560
 561	counter += delta;
 562	if (counter < 0) {
 563		ASSERT(0);
 564		return -EINVAL;
 565	}
 566	*field = counter;
 567	return 0;
 568}
 569
 570/*
 571 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 572 * and apply superblock counter changes to the in-core superblock.  The
 573 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 574 * applied to the in-core superblock.  The idea is that that has already been
 575 * done.
 576 *
 
 
 
 
 
 
 
 577 * If we are not logging superblock counters, then the inode allocated/free and
 578 * used block counts are not updated in the on disk superblock. In this case,
 579 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 580 * still need to update the incore superblock with the changes.
 581 */
 582void
 583xfs_trans_unreserve_and_mod_sb(
 584	struct xfs_trans	*tp)
 585{
 586	struct xfs_mount	*mp = tp->t_mountp;
 587	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 588	int64_t			blkdelta = 0;
 589	int64_t			rtxdelta = 0;
 590	int64_t			idelta = 0;
 591	int64_t			ifreedelta = 0;
 592	int			error;
 
 
 
 
 
 
 593
 594	/* calculate deltas */
 595	if (tp->t_blk_res > 0)
 596		blkdelta = tp->t_blk_res;
 597	if ((tp->t_fdblocks_delta != 0) &&
 598	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 599	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 600	        blkdelta += tp->t_fdblocks_delta;
 601
 602	if (tp->t_rtx_res > 0)
 603		rtxdelta = tp->t_rtx_res;
 604	if ((tp->t_frextents_delta != 0) &&
 605	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 606		rtxdelta += tp->t_frextents_delta;
 607
 608	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 609	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 610		idelta = tp->t_icount_delta;
 611		ifreedelta = tp->t_ifree_delta;
 612	}
 613
 614	/* apply the per-cpu counters */
 615	if (blkdelta) {
 616		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 
 617		if (error)
 618			goto out;
 619	}
 620
 621	if (idelta) {
 622		error = xfs_mod_icount(mp, idelta);
 
 623		if (error)
 624			goto out_undo_fdblocks;
 625	}
 626
 627	if (ifreedelta) {
 628		error = xfs_mod_ifree(mp, ifreedelta);
 
 629		if (error)
 630			goto out_undo_icount;
 631	}
 632
 633	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 634		return;
 635
 636	/* apply remaining deltas */
 637	spin_lock(&mp->m_sb_lock);
 638	if (rtxdelta) {
 639		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
 640		if (error)
 641			goto out_undo_ifree;
 642	}
 643
 644	if (tp->t_dblocks_delta != 0) {
 645		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
 646		if (error)
 647			goto out_undo_frextents;
 648	}
 649	if (tp->t_agcount_delta != 0) {
 650		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
 651		if (error)
 652			goto out_undo_dblocks;
 653	}
 654	if (tp->t_imaxpct_delta != 0) {
 655		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
 656		if (error)
 657			goto out_undo_agcount;
 658	}
 659	if (tp->t_rextsize_delta != 0) {
 660		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
 661				     tp->t_rextsize_delta);
 662		if (error)
 663			goto out_undo_imaxpct;
 664	}
 665	if (tp->t_rbmblocks_delta != 0) {
 666		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
 667				     tp->t_rbmblocks_delta);
 668		if (error)
 669			goto out_undo_rextsize;
 670	}
 671	if (tp->t_rblocks_delta != 0) {
 672		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
 673		if (error)
 674			goto out_undo_rbmblocks;
 675	}
 676	if (tp->t_rextents_delta != 0) {
 677		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
 678				     tp->t_rextents_delta);
 679		if (error)
 680			goto out_undo_rblocks;
 
 
 
 
 681	}
 682	if (tp->t_rextslog_delta != 0) {
 683		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
 684				     tp->t_rextslog_delta);
 
 
 
 
 685		if (error)
 686			goto out_undo_rextents;
 687	}
 688	spin_unlock(&mp->m_sb_lock);
 689	return;
 690
 691out_undo_rextents:
 692	if (tp->t_rextents_delta)
 693		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
 694out_undo_rblocks:
 695	if (tp->t_rblocks_delta)
 696		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
 697out_undo_rbmblocks:
 698	if (tp->t_rbmblocks_delta)
 699		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
 700out_undo_rextsize:
 701	if (tp->t_rextsize_delta)
 702		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
 703out_undo_imaxpct:
 704	if (tp->t_rextsize_delta)
 705		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
 706out_undo_agcount:
 707	if (tp->t_agcount_delta)
 708		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
 709out_undo_dblocks:
 710	if (tp->t_dblocks_delta)
 711		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
 712out_undo_frextents:
 713	if (rtxdelta)
 714		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
 715out_undo_ifree:
 716	spin_unlock(&mp->m_sb_lock);
 717	if (ifreedelta)
 718		xfs_mod_ifree(mp, -ifreedelta);
 719out_undo_icount:
 720	if (idelta)
 721		xfs_mod_icount(mp, -idelta);
 722out_undo_fdblocks:
 723	if (blkdelta)
 724		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
 725out:
 726	ASSERT(error == 0);
 727	return;
 728}
 729
 730/*
 731 * Add the given log item to the transaction's list of log items.
 732 *
 733 * The log item will now point to its new descriptor with its li_desc field.
 734 */
 735void
 736xfs_trans_add_item(
 737	struct xfs_trans	*tp,
 738	struct xfs_log_item	*lip)
 739{
 740	struct xfs_log_item_desc *lidp;
 741
 742	ASSERT(lip->li_mountp == tp->t_mountp);
 743	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 744
 745	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
 746
 747	lidp->lid_item = lip;
 748	lidp->lid_flags = 0;
 749	list_add_tail(&lidp->lid_trans, &tp->t_items);
 750
 751	lip->li_desc = lidp;
 752}
 753
 754STATIC void
 755xfs_trans_free_item_desc(
 756	struct xfs_log_item_desc *lidp)
 757{
 758	list_del_init(&lidp->lid_trans);
 759	kmem_zone_free(xfs_log_item_desc_zone, lidp);
 760}
 761
 762/*
 763 * Unlink and free the given descriptor.
 764 */
 765void
 766xfs_trans_del_item(
 767	struct xfs_log_item	*lip)
 768{
 769	xfs_trans_free_item_desc(lip->li_desc);
 770	lip->li_desc = NULL;
 771}
 772
 773/*
 774 * Unlock all of the items of a transaction and free all the descriptors
 775 * of that transaction.
 776 */
 777void
 778xfs_trans_free_items(
 779	struct xfs_trans	*tp,
 780	xfs_lsn_t		commit_lsn,
 781	bool			abort)
 782{
 783	struct xfs_log_item_desc *lidp, *next;
 784
 785	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
 786		struct xfs_log_item	*lip = lidp->lid_item;
 787
 788		lip->li_desc = NULL;
 789
 790		if (commit_lsn != NULLCOMMITLSN)
 791			lip->li_ops->iop_committing(lip, commit_lsn);
 792		if (abort)
 793			lip->li_flags |= XFS_LI_ABORTED;
 794		lip->li_ops->iop_unlock(lip);
 795
 796		xfs_trans_free_item_desc(lidp);
 797	}
 798}
 799
 800static inline void
 801xfs_log_item_batch_insert(
 802	struct xfs_ail		*ailp,
 803	struct xfs_ail_cursor	*cur,
 804	struct xfs_log_item	**log_items,
 805	int			nr_items,
 806	xfs_lsn_t		commit_lsn)
 807{
 808	int	i;
 809
 810	spin_lock(&ailp->ail_lock);
 811	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 812	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 813
 814	for (i = 0; i < nr_items; i++) {
 815		struct xfs_log_item *lip = log_items[i];
 816
 817		lip->li_ops->iop_unpin(lip, 0);
 818	}
 819}
 820
 821/*
 822 * Bulk operation version of xfs_trans_committed that takes a log vector of
 823 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 824 * minimise lock traffic.
 825 *
 826 * If we are called with the aborted flag set, it is because a log write during
 827 * a CIL checkpoint commit has failed. In this case, all the items in the
 828 * checkpoint have already gone through iop_commited and iop_unlock, which
 829 * means that checkpoint commit abort handling is treated exactly the same
 830 * as an iclog write error even though we haven't started any IO yet. Hence in
 831 * this case all we need to do is iop_committed processing, followed by an
 832 * iop_unpin(aborted) call.
 833 *
 834 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 835 * at the end of the AIL, the insert cursor avoids the need to walk
 836 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 837 * call. This saves a lot of needless list walking and is a net win, even
 838 * though it slightly increases that amount of AIL lock traffic to set it up
 839 * and tear it down.
 840 */
 841void
 842xfs_trans_committed_bulk(
 843	struct xfs_ail		*ailp,
 844	struct xfs_log_vec	*log_vector,
 845	xfs_lsn_t		commit_lsn,
 846	int			aborted)
 847{
 848#define LOG_ITEM_BATCH_SIZE	32
 849	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 850	struct xfs_log_vec	*lv;
 851	struct xfs_ail_cursor	cur;
 852	int			i = 0;
 853
 854	spin_lock(&ailp->ail_lock);
 855	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 856	spin_unlock(&ailp->ail_lock);
 857
 858	/* unpin all the log items */
 859	for (lv = log_vector; lv; lv = lv->lv_next ) {
 860		struct xfs_log_item	*lip = lv->lv_item;
 861		xfs_lsn_t		item_lsn;
 862
 863		if (aborted)
 864			lip->li_flags |= XFS_LI_ABORTED;
 865		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 866
 867		/* item_lsn of -1 means the item needs no further processing */
 868		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 869			continue;
 870
 871		/*
 872		 * if we are aborting the operation, no point in inserting the
 873		 * object into the AIL as we are in a shutdown situation.
 874		 */
 875		if (aborted) {
 876			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
 877			lip->li_ops->iop_unpin(lip, 1);
 878			continue;
 879		}
 880
 881		if (item_lsn != commit_lsn) {
 882
 883			/*
 884			 * Not a bulk update option due to unusual item_lsn.
 885			 * Push into AIL immediately, rechecking the lsn once
 886			 * we have the ail lock. Then unpin the item. This does
 887			 * not affect the AIL cursor the bulk insert path is
 888			 * using.
 889			 */
 890			spin_lock(&ailp->ail_lock);
 891			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 892				xfs_trans_ail_update(ailp, lip, item_lsn);
 893			else
 894				spin_unlock(&ailp->ail_lock);
 895			lip->li_ops->iop_unpin(lip, 0);
 896			continue;
 897		}
 898
 899		/* Item is a candidate for bulk AIL insert.  */
 900		log_items[i++] = lv->lv_item;
 901		if (i >= LOG_ITEM_BATCH_SIZE) {
 902			xfs_log_item_batch_insert(ailp, &cur, log_items,
 903					LOG_ITEM_BATCH_SIZE, commit_lsn);
 904			i = 0;
 905		}
 906	}
 907
 908	/* make sure we insert the remainder! */
 909	if (i)
 910		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 911
 912	spin_lock(&ailp->ail_lock);
 913	xfs_trans_ail_cursor_done(&cur);
 914	spin_unlock(&ailp->ail_lock);
 915}
 916
 917/*
 918 * Commit the given transaction to the log.
 919 *
 920 * XFS disk error handling mechanism is not based on a typical
 921 * transaction abort mechanism. Logically after the filesystem
 922 * gets marked 'SHUTDOWN', we can't let any new transactions
 923 * be durable - ie. committed to disk - because some metadata might
 924 * be inconsistent. In such cases, this returns an error, and the
 925 * caller may assume that all locked objects joined to the transaction
 926 * have already been unlocked as if the commit had succeeded.
 927 * Do not reference the transaction structure after this call.
 928 */
 929static int
 930__xfs_trans_commit(
 931	struct xfs_trans	*tp,
 932	bool			regrant)
 933{
 934	struct xfs_mount	*mp = tp->t_mountp;
 935	xfs_lsn_t		commit_lsn = -1;
 936	int			error = 0;
 
 937	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 938
 939	/*
 
 
 
 
 
 
 
 
 
 940	 * If there is nothing to be logged by the transaction,
 941	 * then unlock all of the items associated with the
 942	 * transaction and free the transaction structure.
 943	 * Also make sure to return any reserved blocks to
 944	 * the free pool.
 945	 */
 946	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 947		goto out_unreserve;
 948
 949	if (XFS_FORCED_SHUTDOWN(mp)) {
 950		error = -EIO;
 951		goto out_unreserve;
 952	}
 953
 954	ASSERT(tp->t_ticket != NULL);
 955
 956	/*
 957	 * If we need to update the superblock, then do it now.
 958	 */
 959	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 960		xfs_trans_apply_sb_deltas(tp);
 961	xfs_trans_apply_dquot_deltas(tp);
 962
 963	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 964
 965	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 966	xfs_trans_free(tp);
 967
 968	/*
 969	 * If the transaction needs to be synchronous, then force the
 970	 * log out now and wait for it.
 971	 */
 972	if (sync) {
 973		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 974		XFS_STATS_INC(mp, xs_trans_sync);
 975	} else {
 976		XFS_STATS_INC(mp, xs_trans_async);
 977	}
 978
 979	return error;
 980
 981out_unreserve:
 982	xfs_trans_unreserve_and_mod_sb(tp);
 983
 984	/*
 985	 * It is indeed possible for the transaction to be not dirty but
 986	 * the dqinfo portion to be.  All that means is that we have some
 987	 * (non-persistent) quota reservations that need to be unreserved.
 988	 */
 989	xfs_trans_unreserve_and_mod_dquots(tp);
 990	if (tp->t_ticket) {
 991		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
 992		if (commit_lsn == -1 && !error)
 993			error = -EIO;
 994	}
 995	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 996	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
 997	xfs_trans_free(tp);
 998
 999	XFS_STATS_INC(mp, xs_trans_empty);
1000	return error;
1001}
1002
1003int
1004xfs_trans_commit(
1005	struct xfs_trans	*tp)
1006{
1007	return __xfs_trans_commit(tp, false);
1008}
1009
1010/*
1011 * Unlock all of the transaction's items and free the transaction.
1012 * The transaction must not have modified any of its items, because
1013 * there is no way to restore them to their previous state.
1014 *
1015 * If the transaction has made a log reservation, make sure to release
1016 * it as well.
1017 */
1018void
1019xfs_trans_cancel(
1020	struct xfs_trans	*tp)
 
1021{
1022	struct xfs_mount	*mp = tp->t_mountp;
1023	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1024
1025	/*
 
 
 
 
 
 
1026	 * See if the caller is relying on us to shut down the
1027	 * filesystem.  This happens in paths where we detect
1028	 * corruption and decide to give up.
1029	 */
1030	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1031		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1032		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1033	}
1034#ifdef DEBUG
1035	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1036		struct xfs_log_item_desc *lidp;
1037
1038		list_for_each_entry(lidp, &tp->t_items, lid_trans)
1039			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1040	}
1041#endif
1042	xfs_trans_unreserve_and_mod_sb(tp);
1043	xfs_trans_unreserve_and_mod_dquots(tp);
1044
1045	if (tp->t_ticket)
1046		xfs_log_done(mp, tp->t_ticket, NULL, false);
 
 
 
 
 
 
 
1047
1048	/* mark this thread as no longer being in a transaction */
1049	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1050
1051	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1052	xfs_trans_free(tp);
1053}
1054
1055/*
1056 * Roll from one trans in the sequence of PERMANENT transactions to
1057 * the next: permanent transactions are only flushed out when
1058 * committed with xfs_trans_commit(), but we still want as soon
1059 * as possible to let chunks of it go to the log. So we commit the
1060 * chunk we've been working on and get a new transaction to continue.
1061 */
1062int
1063xfs_trans_roll(
1064	struct xfs_trans	**tpp)
 
1065{
1066	struct xfs_trans	*trans = *tpp;
1067	struct xfs_trans_res	tres;
1068	int			error;
1069
1070	/*
 
 
 
 
 
 
1071	 * Copy the critical parameters from one trans to the next.
1072	 */
1073	tres.tr_logres = trans->t_log_res;
1074	tres.tr_logcount = trans->t_log_count;
1075
1076	*tpp = xfs_trans_dup(trans);
1077
1078	/*
1079	 * Commit the current transaction.
1080	 * If this commit failed, then it'd just unlock those items that
1081	 * are not marked ihold. That also means that a filesystem shutdown
1082	 * is in progress. The caller takes the responsibility to cancel
1083	 * the duplicate transaction that gets returned.
1084	 */
1085	error = __xfs_trans_commit(trans, true);
1086	if (error)
1087		return error;
 
 
1088
1089	/*
1090	 * Reserve space in the log for the next transaction.
 
 
 
 
 
 
 
1091	 * This also pushes items in the "AIL", the list of logged items,
1092	 * out to disk if they are taking up space at the tail of the log
1093	 * that we want to use.  This requires that either nothing be locked
1094	 * across this call, or that anything that is locked be logged in
1095	 * the prior and the next transactions.
1096	 */
1097	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1098	return xfs_trans_reserve(*tpp, &tres, 0, 0);
 
 
 
 
 
 
 
 
1099}
v3.15
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_mount.h"
  28#include "xfs_inode.h"
  29#include "xfs_extent_busy.h"
  30#include "xfs_quota.h"
  31#include "xfs_trans.h"
  32#include "xfs_trans_priv.h"
  33#include "xfs_log.h"
  34#include "xfs_trace.h"
  35#include "xfs_error.h"
  36
  37kmem_zone_t	*xfs_trans_zone;
  38kmem_zone_t	*xfs_log_item_desc_zone;
  39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  40/*
  41 * Initialize the precomputed transaction reservation values
  42 * in the mount structure.
  43 */
  44void
  45xfs_trans_init(
  46	struct xfs_mount	*mp)
  47{
  48	xfs_trans_resv_calc(mp, M_RES(mp));
  49}
  50
  51/*
  52 * This routine is called to allocate a transaction structure.
  53 * The type parameter indicates the type of the transaction.  These
  54 * are enumerated in xfs_trans.h.
  55 *
  56 * Dynamically allocate the transaction structure from the transaction
  57 * zone, initialize it, and return it to the caller.
  58 */
  59xfs_trans_t *
  60xfs_trans_alloc(
  61	xfs_mount_t	*mp,
  62	uint		type)
  63{
  64	xfs_trans_t     *tp;
  65
  66	sb_start_intwrite(mp->m_super);
  67	tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
  68	tp->t_flags |= XFS_TRANS_FREEZE_PROT;
  69	return tp;
  70}
  71
  72xfs_trans_t *
  73_xfs_trans_alloc(
  74	xfs_mount_t	*mp,
  75	uint		type,
  76	xfs_km_flags_t	memflags)
  77{
  78	xfs_trans_t	*tp;
  79
  80	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
  81	atomic_inc(&mp->m_active_trans);
  82
  83	tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
  84	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
  85	tp->t_type = type;
  86	tp->t_mountp = mp;
  87	INIT_LIST_HEAD(&tp->t_items);
  88	INIT_LIST_HEAD(&tp->t_busy);
  89	return tp;
  90}
  91
  92/*
  93 * Free the transaction structure.  If there is more clean up
  94 * to do when the structure is freed, add it here.
  95 */
  96STATIC void
  97xfs_trans_free(
  98	struct xfs_trans	*tp)
  99{
 100	xfs_extent_busy_sort(&tp->t_busy);
 101	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
 102
 103	atomic_dec(&tp->t_mountp->m_active_trans);
 104	if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
 105		sb_end_intwrite(tp->t_mountp->m_super);
 106	xfs_trans_free_dqinfo(tp);
 107	kmem_zone_free(xfs_trans_zone, tp);
 108}
 109
 110/*
 111 * This is called to create a new transaction which will share the
 112 * permanent log reservation of the given transaction.  The remaining
 113 * unused block and rt extent reservations are also inherited.  This
 114 * implies that the original transaction is no longer allowed to allocate
 115 * blocks.  Locks and log items, however, are no inherited.  They must
 116 * be added to the new transaction explicitly.
 117 */
 118xfs_trans_t *
 119xfs_trans_dup(
 120	xfs_trans_t	*tp)
 121{
 122	xfs_trans_t	*ntp;
 123
 124	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
 125
 126	/*
 127	 * Initialize the new transaction structure.
 128	 */
 129	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 130	ntp->t_type = tp->t_type;
 131	ntp->t_mountp = tp->t_mountp;
 132	INIT_LIST_HEAD(&ntp->t_items);
 133	INIT_LIST_HEAD(&ntp->t_busy);
 134
 135	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 136	ASSERT(tp->t_ticket != NULL);
 137
 138	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 139		       (tp->t_flags & XFS_TRANS_RESERVE) |
 140		       (tp->t_flags & XFS_TRANS_FREEZE_PROT);
 141	/* We gave our writer reference to the new transaction */
 142	tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
 143	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 
 
 144	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 145	tp->t_blk_res = tp->t_blk_res_used;
 
 146	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 147	tp->t_rtx_res = tp->t_rtx_res_used;
 148	ntp->t_pflags = tp->t_pflags;
 149
 150	xfs_trans_dup_dqinfo(tp, ntp);
 151
 152	atomic_inc(&tp->t_mountp->m_active_trans);
 153	return ntp;
 154}
 155
 156/*
 157 * This is called to reserve free disk blocks and log space for the
 158 * given transaction.  This must be done before allocating any resources
 159 * within the transaction.
 160 *
 161 * This will return ENOSPC if there are not enough blocks available.
 162 * It will sleep waiting for available log space.
 163 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 164 * is used by long running transactions.  If any one of the reservations
 165 * fails then they will all be backed out.
 166 *
 167 * This does not do quota reservations. That typically is done by the
 168 * caller afterwards.
 169 */
 170int
 171xfs_trans_reserve(
 172	struct xfs_trans	*tp,
 173	struct xfs_trans_res	*resp,
 174	uint			blocks,
 175	uint			rtextents)
 176{
 177	int		error = 0;
 178	int		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 179
 180	/* Mark this thread as being in a transaction */
 181	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
 182
 183	/*
 184	 * Attempt to reserve the needed disk blocks by decrementing
 185	 * the number needed from the number available.  This will
 186	 * fail if the count would go below zero.
 187	 */
 188	if (blocks > 0) {
 189		error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
 190					  -((int64_t)blocks), rsvd);
 191		if (error != 0) {
 192			current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 193			return (XFS_ERROR(ENOSPC));
 194		}
 195		tp->t_blk_res += blocks;
 196	}
 197
 198	/*
 199	 * Reserve the log space needed for this transaction.
 200	 */
 201	if (resp->tr_logres > 0) {
 202		bool	permanent = false;
 203
 204		ASSERT(tp->t_log_res == 0 ||
 205		       tp->t_log_res == resp->tr_logres);
 206		ASSERT(tp->t_log_count == 0 ||
 207		       tp->t_log_count == resp->tr_logcount);
 208
 209		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 210			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 211			permanent = true;
 212		} else {
 213			ASSERT(tp->t_ticket == NULL);
 214			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 215		}
 216
 217		if (tp->t_ticket != NULL) {
 218			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 219			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 220		} else {
 221			error = xfs_log_reserve(tp->t_mountp,
 222						resp->tr_logres,
 223						resp->tr_logcount,
 224						&tp->t_ticket, XFS_TRANSACTION,
 225						permanent, tp->t_type);
 226		}
 227
 228		if (error)
 229			goto undo_blocks;
 230
 231		tp->t_log_res = resp->tr_logres;
 232		tp->t_log_count = resp->tr_logcount;
 233	}
 234
 235	/*
 236	 * Attempt to reserve the needed realtime extents by decrementing
 237	 * the number needed from the number available.  This will
 238	 * fail if the count would go below zero.
 239	 */
 240	if (rtextents > 0) {
 241		error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS,
 242					  -((int64_t)rtextents), rsvd);
 243		if (error) {
 244			error = XFS_ERROR(ENOSPC);
 245			goto undo_log;
 246		}
 247		tp->t_rtx_res += rtextents;
 248	}
 249
 250	return 0;
 251
 252	/*
 253	 * Error cases jump to one of these labels to undo any
 254	 * reservations which have already been performed.
 255	 */
 256undo_log:
 257	if (resp->tr_logres > 0) {
 258		int		log_flags;
 259
 260		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 261			log_flags = XFS_LOG_REL_PERM_RESERV;
 262		} else {
 263			log_flags = 0;
 264		}
 265		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
 266		tp->t_ticket = NULL;
 267		tp->t_log_res = 0;
 268		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 269	}
 270
 271undo_blocks:
 272	if (blocks > 0) {
 273		xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
 274					 (int64_t)blocks, rsvd);
 275		tp->t_blk_res = 0;
 276	}
 277
 278	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 279
 280	return error;
 281}
 282
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283/*
 284 * Record the indicated change to the given field for application
 285 * to the file system's superblock when the transaction commits.
 286 * For now, just store the change in the transaction structure.
 287 *
 288 * Mark the transaction structure to indicate that the superblock
 289 * needs to be updated before committing.
 290 *
 291 * Because we may not be keeping track of allocated/free inodes and
 292 * used filesystem blocks in the superblock, we do not mark the
 293 * superblock dirty in this transaction if we modify these fields.
 294 * We still need to update the transaction deltas so that they get
 295 * applied to the incore superblock, but we don't want them to
 296 * cause the superblock to get locked and logged if these are the
 297 * only fields in the superblock that the transaction modifies.
 298 */
 299void
 300xfs_trans_mod_sb(
 301	xfs_trans_t	*tp,
 302	uint		field,
 303	int64_t		delta)
 304{
 305	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 306	xfs_mount_t	*mp = tp->t_mountp;
 307
 308	switch (field) {
 309	case XFS_TRANS_SB_ICOUNT:
 310		tp->t_icount_delta += delta;
 311		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 312			flags &= ~XFS_TRANS_SB_DIRTY;
 313		break;
 314	case XFS_TRANS_SB_IFREE:
 315		tp->t_ifree_delta += delta;
 316		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 317			flags &= ~XFS_TRANS_SB_DIRTY;
 318		break;
 319	case XFS_TRANS_SB_FDBLOCKS:
 320		/*
 321		 * Track the number of blocks allocated in the
 322		 * transaction.  Make sure it does not exceed the
 323		 * number reserved.
 324		 */
 325		if (delta < 0) {
 326			tp->t_blk_res_used += (uint)-delta;
 327			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
 
 328		}
 329		tp->t_fdblocks_delta += delta;
 330		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 331			flags &= ~XFS_TRANS_SB_DIRTY;
 332		break;
 333	case XFS_TRANS_SB_RES_FDBLOCKS:
 334		/*
 335		 * The allocation has already been applied to the
 336		 * in-core superblock's counter.  This should only
 337		 * be applied to the on-disk superblock.
 338		 */
 339		ASSERT(delta < 0);
 340		tp->t_res_fdblocks_delta += delta;
 341		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 342			flags &= ~XFS_TRANS_SB_DIRTY;
 343		break;
 344	case XFS_TRANS_SB_FREXTENTS:
 345		/*
 346		 * Track the number of blocks allocated in the
 347		 * transaction.  Make sure it does not exceed the
 348		 * number reserved.
 349		 */
 350		if (delta < 0) {
 351			tp->t_rtx_res_used += (uint)-delta;
 352			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 353		}
 354		tp->t_frextents_delta += delta;
 355		break;
 356	case XFS_TRANS_SB_RES_FREXTENTS:
 357		/*
 358		 * The allocation has already been applied to the
 359		 * in-core superblock's counter.  This should only
 360		 * be applied to the on-disk superblock.
 361		 */
 362		ASSERT(delta < 0);
 363		tp->t_res_frextents_delta += delta;
 364		break;
 365	case XFS_TRANS_SB_DBLOCKS:
 366		ASSERT(delta > 0);
 367		tp->t_dblocks_delta += delta;
 368		break;
 369	case XFS_TRANS_SB_AGCOUNT:
 370		ASSERT(delta > 0);
 371		tp->t_agcount_delta += delta;
 372		break;
 373	case XFS_TRANS_SB_IMAXPCT:
 374		tp->t_imaxpct_delta += delta;
 375		break;
 376	case XFS_TRANS_SB_REXTSIZE:
 377		tp->t_rextsize_delta += delta;
 378		break;
 379	case XFS_TRANS_SB_RBMBLOCKS:
 380		tp->t_rbmblocks_delta += delta;
 381		break;
 382	case XFS_TRANS_SB_RBLOCKS:
 383		tp->t_rblocks_delta += delta;
 384		break;
 385	case XFS_TRANS_SB_REXTENTS:
 386		tp->t_rextents_delta += delta;
 387		break;
 388	case XFS_TRANS_SB_REXTSLOG:
 389		tp->t_rextslog_delta += delta;
 390		break;
 391	default:
 392		ASSERT(0);
 393		return;
 394	}
 395
 396	tp->t_flags |= flags;
 397}
 398
 399/*
 400 * xfs_trans_apply_sb_deltas() is called from the commit code
 401 * to bring the superblock buffer into the current transaction
 402 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 403 *
 404 * For now we just look at each field allowed to change and change
 405 * it if necessary.
 406 */
 407STATIC void
 408xfs_trans_apply_sb_deltas(
 409	xfs_trans_t	*tp)
 410{
 411	xfs_dsb_t	*sbp;
 412	xfs_buf_t	*bp;
 413	int		whole = 0;
 414
 415	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 416	sbp = XFS_BUF_TO_SBP(bp);
 417
 418	/*
 419	 * Check that superblock mods match the mods made to AGF counters.
 420	 */
 421	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 422	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 423		tp->t_ag_btree_delta));
 424
 425	/*
 426	 * Only update the superblock counters if we are logging them
 427	 */
 428	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 429		if (tp->t_icount_delta)
 430			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 431		if (tp->t_ifree_delta)
 432			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 433		if (tp->t_fdblocks_delta)
 434			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 435		if (tp->t_res_fdblocks_delta)
 436			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 437	}
 438
 439	if (tp->t_frextents_delta)
 440		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 441	if (tp->t_res_frextents_delta)
 442		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 443
 444	if (tp->t_dblocks_delta) {
 445		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 446		whole = 1;
 447	}
 448	if (tp->t_agcount_delta) {
 449		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 450		whole = 1;
 451	}
 452	if (tp->t_imaxpct_delta) {
 453		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 454		whole = 1;
 455	}
 456	if (tp->t_rextsize_delta) {
 457		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 458		whole = 1;
 459	}
 460	if (tp->t_rbmblocks_delta) {
 461		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 462		whole = 1;
 463	}
 464	if (tp->t_rblocks_delta) {
 465		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 466		whole = 1;
 467	}
 468	if (tp->t_rextents_delta) {
 469		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 470		whole = 1;
 471	}
 472	if (tp->t_rextslog_delta) {
 473		sbp->sb_rextslog += tp->t_rextslog_delta;
 474		whole = 1;
 475	}
 476
 
 477	if (whole)
 478		/*
 479		 * Log the whole thing, the fields are noncontiguous.
 480		 */
 481		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 482	else
 483		/*
 484		 * Since all the modifiable fields are contiguous, we
 485		 * can get away with this.
 486		 */
 487		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 488				  offsetof(xfs_dsb_t, sb_frextents) +
 489				  sizeof(sbp->sb_frextents) - 1);
 490}
 491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 492/*
 493 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 494 * and apply superblock counter changes to the in-core superblock.  The
 495 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 496 * applied to the in-core superblock.  The idea is that that has already been
 497 * done.
 498 *
 499 * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
 500 * However, we have to ensure that we only modify each superblock field only
 501 * once because the application of the delta values may not be atomic. That can
 502 * lead to ENOSPC races occurring if we have two separate modifcations of the
 503 * free space counter to put back the entire reservation and then take away
 504 * what we used.
 505 *
 506 * If we are not logging superblock counters, then the inode allocated/free and
 507 * used block counts are not updated in the on disk superblock. In this case,
 508 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 509 * still need to update the incore superblock with the changes.
 510 */
 511void
 512xfs_trans_unreserve_and_mod_sb(
 513	xfs_trans_t	*tp)
 514{
 515	xfs_mod_sb_t	msb[9];	/* If you add cases, add entries */
 516	xfs_mod_sb_t	*msbp;
 517	xfs_mount_t	*mp = tp->t_mountp;
 518	/* REFERENCED */
 519	int		error;
 520	int		rsvd;
 521	int64_t		blkdelta = 0;
 522	int64_t		rtxdelta = 0;
 523	int64_t		idelta = 0;
 524	int64_t		ifreedelta = 0;
 525
 526	msbp = msb;
 527	rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 528
 529	/* calculate deltas */
 530	if (tp->t_blk_res > 0)
 531		blkdelta = tp->t_blk_res;
 532	if ((tp->t_fdblocks_delta != 0) &&
 533	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 534	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 535	        blkdelta += tp->t_fdblocks_delta;
 536
 537	if (tp->t_rtx_res > 0)
 538		rtxdelta = tp->t_rtx_res;
 539	if ((tp->t_frextents_delta != 0) &&
 540	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 541		rtxdelta += tp->t_frextents_delta;
 542
 543	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 544	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 545		idelta = tp->t_icount_delta;
 546		ifreedelta = tp->t_ifree_delta;
 547	}
 548
 549	/* apply the per-cpu counters */
 550	if (blkdelta) {
 551		error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
 552						 blkdelta, rsvd);
 553		if (error)
 554			goto out;
 555	}
 556
 557	if (idelta) {
 558		error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT,
 559						 idelta, rsvd);
 560		if (error)
 561			goto out_undo_fdblocks;
 562	}
 563
 564	if (ifreedelta) {
 565		error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE,
 566						 ifreedelta, rsvd);
 567		if (error)
 568			goto out_undo_icount;
 569	}
 570
 
 
 
 571	/* apply remaining deltas */
 572	if (rtxdelta != 0) {
 573		msbp->msb_field = XFS_SBS_FREXTENTS;
 574		msbp->msb_delta = rtxdelta;
 575		msbp++;
 
 576	}
 577
 578	if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
 579		if (tp->t_dblocks_delta != 0) {
 580			msbp->msb_field = XFS_SBS_DBLOCKS;
 581			msbp->msb_delta = tp->t_dblocks_delta;
 582			msbp++;
 583		}
 584		if (tp->t_agcount_delta != 0) {
 585			msbp->msb_field = XFS_SBS_AGCOUNT;
 586			msbp->msb_delta = tp->t_agcount_delta;
 587			msbp++;
 588		}
 589		if (tp->t_imaxpct_delta != 0) {
 590			msbp->msb_field = XFS_SBS_IMAX_PCT;
 591			msbp->msb_delta = tp->t_imaxpct_delta;
 592			msbp++;
 593		}
 594		if (tp->t_rextsize_delta != 0) {
 595			msbp->msb_field = XFS_SBS_REXTSIZE;
 596			msbp->msb_delta = tp->t_rextsize_delta;
 597			msbp++;
 598		}
 599		if (tp->t_rbmblocks_delta != 0) {
 600			msbp->msb_field = XFS_SBS_RBMBLOCKS;
 601			msbp->msb_delta = tp->t_rbmblocks_delta;
 602			msbp++;
 603		}
 604		if (tp->t_rblocks_delta != 0) {
 605			msbp->msb_field = XFS_SBS_RBLOCKS;
 606			msbp->msb_delta = tp->t_rblocks_delta;
 607			msbp++;
 608		}
 609		if (tp->t_rextents_delta != 0) {
 610			msbp->msb_field = XFS_SBS_REXTENTS;
 611			msbp->msb_delta = tp->t_rextents_delta;
 612			msbp++;
 613		}
 614		if (tp->t_rextslog_delta != 0) {
 615			msbp->msb_field = XFS_SBS_REXTSLOG;
 616			msbp->msb_delta = tp->t_rextslog_delta;
 617			msbp++;
 618		}
 619	}
 620
 621	/*
 622	 * If we need to change anything, do it.
 623	 */
 624	if (msbp > msb) {
 625		error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
 626			(uint)(msbp - msb), rsvd);
 627		if (error)
 628			goto out_undo_ifreecount;
 629	}
 630
 631	return;
 632
 633out_undo_ifreecount:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 634	if (ifreedelta)
 635		xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd);
 636out_undo_icount:
 637	if (idelta)
 638		xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd);
 639out_undo_fdblocks:
 640	if (blkdelta)
 641		xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
 642out:
 643	ASSERT(error == 0);
 644	return;
 645}
 646
 647/*
 648 * Add the given log item to the transaction's list of log items.
 649 *
 650 * The log item will now point to its new descriptor with its li_desc field.
 651 */
 652void
 653xfs_trans_add_item(
 654	struct xfs_trans	*tp,
 655	struct xfs_log_item	*lip)
 656{
 657	struct xfs_log_item_desc *lidp;
 658
 659	ASSERT(lip->li_mountp == tp->t_mountp);
 660	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 661
 662	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
 663
 664	lidp->lid_item = lip;
 665	lidp->lid_flags = 0;
 666	list_add_tail(&lidp->lid_trans, &tp->t_items);
 667
 668	lip->li_desc = lidp;
 669}
 670
 671STATIC void
 672xfs_trans_free_item_desc(
 673	struct xfs_log_item_desc *lidp)
 674{
 675	list_del_init(&lidp->lid_trans);
 676	kmem_zone_free(xfs_log_item_desc_zone, lidp);
 677}
 678
 679/*
 680 * Unlink and free the given descriptor.
 681 */
 682void
 683xfs_trans_del_item(
 684	struct xfs_log_item	*lip)
 685{
 686	xfs_trans_free_item_desc(lip->li_desc);
 687	lip->li_desc = NULL;
 688}
 689
 690/*
 691 * Unlock all of the items of a transaction and free all the descriptors
 692 * of that transaction.
 693 */
 694void
 695xfs_trans_free_items(
 696	struct xfs_trans	*tp,
 697	xfs_lsn_t		commit_lsn,
 698	int			flags)
 699{
 700	struct xfs_log_item_desc *lidp, *next;
 701
 702	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
 703		struct xfs_log_item	*lip = lidp->lid_item;
 704
 705		lip->li_desc = NULL;
 706
 707		if (commit_lsn != NULLCOMMITLSN)
 708			lip->li_ops->iop_committing(lip, commit_lsn);
 709		if (flags & XFS_TRANS_ABORT)
 710			lip->li_flags |= XFS_LI_ABORTED;
 711		lip->li_ops->iop_unlock(lip);
 712
 713		xfs_trans_free_item_desc(lidp);
 714	}
 715}
 716
 717static inline void
 718xfs_log_item_batch_insert(
 719	struct xfs_ail		*ailp,
 720	struct xfs_ail_cursor	*cur,
 721	struct xfs_log_item	**log_items,
 722	int			nr_items,
 723	xfs_lsn_t		commit_lsn)
 724{
 725	int	i;
 726
 727	spin_lock(&ailp->xa_lock);
 728	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
 729	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 730
 731	for (i = 0; i < nr_items; i++) {
 732		struct xfs_log_item *lip = log_items[i];
 733
 734		lip->li_ops->iop_unpin(lip, 0);
 735	}
 736}
 737
 738/*
 739 * Bulk operation version of xfs_trans_committed that takes a log vector of
 740 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 741 * minimise lock traffic.
 742 *
 743 * If we are called with the aborted flag set, it is because a log write during
 744 * a CIL checkpoint commit has failed. In this case, all the items in the
 745 * checkpoint have already gone through iop_commited and iop_unlock, which
 746 * means that checkpoint commit abort handling is treated exactly the same
 747 * as an iclog write error even though we haven't started any IO yet. Hence in
 748 * this case all we need to do is iop_committed processing, followed by an
 749 * iop_unpin(aborted) call.
 750 *
 751 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 752 * at the end of the AIL, the insert cursor avoids the need to walk
 753 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 754 * call. This saves a lot of needless list walking and is a net win, even
 755 * though it slightly increases that amount of AIL lock traffic to set it up
 756 * and tear it down.
 757 */
 758void
 759xfs_trans_committed_bulk(
 760	struct xfs_ail		*ailp,
 761	struct xfs_log_vec	*log_vector,
 762	xfs_lsn_t		commit_lsn,
 763	int			aborted)
 764{
 765#define LOG_ITEM_BATCH_SIZE	32
 766	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 767	struct xfs_log_vec	*lv;
 768	struct xfs_ail_cursor	cur;
 769	int			i = 0;
 770
 771	spin_lock(&ailp->xa_lock);
 772	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 773	spin_unlock(&ailp->xa_lock);
 774
 775	/* unpin all the log items */
 776	for (lv = log_vector; lv; lv = lv->lv_next ) {
 777		struct xfs_log_item	*lip = lv->lv_item;
 778		xfs_lsn_t		item_lsn;
 779
 780		if (aborted)
 781			lip->li_flags |= XFS_LI_ABORTED;
 782		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 783
 784		/* item_lsn of -1 means the item needs no further processing */
 785		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 786			continue;
 787
 788		/*
 789		 * if we are aborting the operation, no point in inserting the
 790		 * object into the AIL as we are in a shutdown situation.
 791		 */
 792		if (aborted) {
 793			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
 794			lip->li_ops->iop_unpin(lip, 1);
 795			continue;
 796		}
 797
 798		if (item_lsn != commit_lsn) {
 799
 800			/*
 801			 * Not a bulk update option due to unusual item_lsn.
 802			 * Push into AIL immediately, rechecking the lsn once
 803			 * we have the ail lock. Then unpin the item. This does
 804			 * not affect the AIL cursor the bulk insert path is
 805			 * using.
 806			 */
 807			spin_lock(&ailp->xa_lock);
 808			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 809				xfs_trans_ail_update(ailp, lip, item_lsn);
 810			else
 811				spin_unlock(&ailp->xa_lock);
 812			lip->li_ops->iop_unpin(lip, 0);
 813			continue;
 814		}
 815
 816		/* Item is a candidate for bulk AIL insert.  */
 817		log_items[i++] = lv->lv_item;
 818		if (i >= LOG_ITEM_BATCH_SIZE) {
 819			xfs_log_item_batch_insert(ailp, &cur, log_items,
 820					LOG_ITEM_BATCH_SIZE, commit_lsn);
 821			i = 0;
 822		}
 823	}
 824
 825	/* make sure we insert the remainder! */
 826	if (i)
 827		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 828
 829	spin_lock(&ailp->xa_lock);
 830	xfs_trans_ail_cursor_done(ailp, &cur);
 831	spin_unlock(&ailp->xa_lock);
 832}
 833
 834/*
 835 * Commit the given transaction to the log.
 836 *
 837 * XFS disk error handling mechanism is not based on a typical
 838 * transaction abort mechanism. Logically after the filesystem
 839 * gets marked 'SHUTDOWN', we can't let any new transactions
 840 * be durable - ie. committed to disk - because some metadata might
 841 * be inconsistent. In such cases, this returns an error, and the
 842 * caller may assume that all locked objects joined to the transaction
 843 * have already been unlocked as if the commit had succeeded.
 844 * Do not reference the transaction structure after this call.
 845 */
 846int
 847xfs_trans_commit(
 848	struct xfs_trans	*tp,
 849	uint			flags)
 850{
 851	struct xfs_mount	*mp = tp->t_mountp;
 852	xfs_lsn_t		commit_lsn = -1;
 853	int			error = 0;
 854	int			log_flags = 0;
 855	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 856
 857	/*
 858	 * Determine whether this commit is releasing a permanent
 859	 * log reservation or not.
 860	 */
 861	if (flags & XFS_TRANS_RELEASE_LOG_RES) {
 862		ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 863		log_flags = XFS_LOG_REL_PERM_RESERV;
 864	}
 865
 866	/*
 867	 * If there is nothing to be logged by the transaction,
 868	 * then unlock all of the items associated with the
 869	 * transaction and free the transaction structure.
 870	 * Also make sure to return any reserved blocks to
 871	 * the free pool.
 872	 */
 873	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 874		goto out_unreserve;
 875
 876	if (XFS_FORCED_SHUTDOWN(mp)) {
 877		error = XFS_ERROR(EIO);
 878		goto out_unreserve;
 879	}
 880
 881	ASSERT(tp->t_ticket != NULL);
 882
 883	/*
 884	 * If we need to update the superblock, then do it now.
 885	 */
 886	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 887		xfs_trans_apply_sb_deltas(tp);
 888	xfs_trans_apply_dquot_deltas(tp);
 889
 890	xfs_log_commit_cil(mp, tp, &commit_lsn, flags);
 891
 892	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 893	xfs_trans_free(tp);
 894
 895	/*
 896	 * If the transaction needs to be synchronous, then force the
 897	 * log out now and wait for it.
 898	 */
 899	if (sync) {
 900		error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 901		XFS_STATS_INC(xs_trans_sync);
 902	} else {
 903		XFS_STATS_INC(xs_trans_async);
 904	}
 905
 906	return error;
 907
 908out_unreserve:
 909	xfs_trans_unreserve_and_mod_sb(tp);
 910
 911	/*
 912	 * It is indeed possible for the transaction to be not dirty but
 913	 * the dqinfo portion to be.  All that means is that we have some
 914	 * (non-persistent) quota reservations that need to be unreserved.
 915	 */
 916	xfs_trans_unreserve_and_mod_dquots(tp);
 917	if (tp->t_ticket) {
 918		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
 919		if (commit_lsn == -1 && !error)
 920			error = XFS_ERROR(EIO);
 921	}
 922	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 923	xfs_trans_free_items(tp, NULLCOMMITLSN, error ? XFS_TRANS_ABORT : 0);
 924	xfs_trans_free(tp);
 925
 926	XFS_STATS_INC(xs_trans_empty);
 927	return error;
 928}
 929
 
 
 
 
 
 
 
 930/*
 931 * Unlock all of the transaction's items and free the transaction.
 932 * The transaction must not have modified any of its items, because
 933 * there is no way to restore them to their previous state.
 934 *
 935 * If the transaction has made a log reservation, make sure to release
 936 * it as well.
 937 */
 938void
 939xfs_trans_cancel(
 940	xfs_trans_t		*tp,
 941	int			flags)
 942{
 943	int			log_flags;
 944	xfs_mount_t		*mp = tp->t_mountp;
 945
 946	/*
 947	 * See if the caller is being too lazy to figure out if
 948	 * the transaction really needs an abort.
 949	 */
 950	if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY))
 951		flags &= ~XFS_TRANS_ABORT;
 952	/*
 953	 * See if the caller is relying on us to shut down the
 954	 * filesystem.  This happens in paths where we detect
 955	 * corruption and decide to give up.
 956	 */
 957	if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
 958		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
 959		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 960	}
 961#ifdef DEBUG
 962	if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) {
 963		struct xfs_log_item_desc *lidp;
 964
 965		list_for_each_entry(lidp, &tp->t_items, lid_trans)
 966			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
 967	}
 968#endif
 969	xfs_trans_unreserve_and_mod_sb(tp);
 970	xfs_trans_unreserve_and_mod_dquots(tp);
 971
 972	if (tp->t_ticket) {
 973		if (flags & XFS_TRANS_RELEASE_LOG_RES) {
 974			ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 975			log_flags = XFS_LOG_REL_PERM_RESERV;
 976		} else {
 977			log_flags = 0;
 978		}
 979		xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
 980	}
 981
 982	/* mark this thread as no longer being in a transaction */
 983	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 984
 985	xfs_trans_free_items(tp, NULLCOMMITLSN, flags);
 986	xfs_trans_free(tp);
 987}
 988
 989/*
 990 * Roll from one trans in the sequence of PERMANENT transactions to
 991 * the next: permanent transactions are only flushed out when
 992 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
 993 * as possible to let chunks of it go to the log. So we commit the
 994 * chunk we've been working on and get a new transaction to continue.
 995 */
 996int
 997xfs_trans_roll(
 998	struct xfs_trans	**tpp,
 999	struct xfs_inode	*dp)
1000{
1001	struct xfs_trans	*trans;
1002	struct xfs_trans_res	tres;
1003	int			error;
1004
1005	/*
1006	 * Ensure that the inode is always logged.
1007	 */
1008	trans = *tpp;
1009	xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1010
1011	/*
1012	 * Copy the critical parameters from one trans to the next.
1013	 */
1014	tres.tr_logres = trans->t_log_res;
1015	tres.tr_logcount = trans->t_log_count;
 
1016	*tpp = xfs_trans_dup(trans);
1017
1018	/*
1019	 * Commit the current transaction.
1020	 * If this commit failed, then it'd just unlock those items that
1021	 * are not marked ihold. That also means that a filesystem shutdown
1022	 * is in progress. The caller takes the responsibility to cancel
1023	 * the duplicate transaction that gets returned.
1024	 */
1025	error = xfs_trans_commit(trans, 0);
1026	if (error)
1027		return (error);
1028
1029	trans = *tpp;
1030
1031	/*
1032	 * transaction commit worked ok so we can drop the extra ticket
1033	 * reference that we gained in xfs_trans_dup()
1034	 */
1035	xfs_log_ticket_put(trans->t_ticket);
1036
1037
1038	/*
1039	 * Reserve space in the log for th next transaction.
1040	 * This also pushes items in the "AIL", the list of logged items,
1041	 * out to disk if they are taking up space at the tail of the log
1042	 * that we want to use.  This requires that either nothing be locked
1043	 * across this call, or that anything that is locked be logged in
1044	 * the prior and the next transactions.
1045	 */
1046	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1047	error = xfs_trans_reserve(trans, &tres, 0, 0);
1048	/*
1049	 *  Ensure that the inode is in the new transaction and locked.
1050	 */
1051	if (error)
1052		return error;
1053
1054	xfs_trans_ijoin(trans, dp, 0);
1055	return 0;
1056}