Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.17
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_mount.h"
  26#include "xfs_inode.h"
  27#include "xfs_extent_busy.h"
  28#include "xfs_quota.h"
  29#include "xfs_trans.h"
  30#include "xfs_trans_priv.h"
  31#include "xfs_log.h"
  32#include "xfs_trace.h"
  33#include "xfs_error.h"
  34
  35kmem_zone_t	*xfs_trans_zone;
  36kmem_zone_t	*xfs_log_item_desc_zone;
  37
  38#if defined(CONFIG_TRACEPOINTS)
  39static void
  40xfs_trans_trace_reservations(
  41	struct xfs_mount	*mp)
  42{
  43	struct xfs_trans_res	resv;
  44	struct xfs_trans_res	*res;
  45	struct xfs_trans_res	*end_res;
  46	int			i;
  47
  48	res = (struct xfs_trans_res *)M_RES(mp);
  49	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
  50	for (i = 0; res < end_res; i++, res++)
  51		trace_xfs_trans_resv_calc(mp, i, res);
  52	xfs_log_get_max_trans_res(mp, &resv);
  53	trace_xfs_trans_resv_calc(mp, -1, &resv);
  54}
  55#else
  56# define xfs_trans_trace_reservations(mp)
  57#endif
  58
  59/*
  60 * Initialize the precomputed transaction reservation values
  61 * in the mount structure.
  62 */
  63void
  64xfs_trans_init(
  65	struct xfs_mount	*mp)
  66{
  67	xfs_trans_resv_calc(mp, M_RES(mp));
  68	xfs_trans_trace_reservations(mp);
  69}
  70
  71/*
  72 * Free the transaction structure.  If there is more clean up
  73 * to do when the structure is freed, add it here.
  74 */
  75STATIC void
  76xfs_trans_free(
  77	struct xfs_trans	*tp)
  78{
  79	xfs_extent_busy_sort(&tp->t_busy);
  80	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  81
  82	atomic_dec(&tp->t_mountp->m_active_trans);
  83	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  84		sb_end_intwrite(tp->t_mountp->m_super);
  85	xfs_trans_free_dqinfo(tp);
  86	kmem_zone_free(xfs_trans_zone, tp);
  87}
  88
  89/*
  90 * This is called to create a new transaction which will share the
  91 * permanent log reservation of the given transaction.  The remaining
  92 * unused block and rt extent reservations are also inherited.  This
  93 * implies that the original transaction is no longer allowed to allocate
  94 * blocks.  Locks and log items, however, are no inherited.  They must
  95 * be added to the new transaction explicitly.
  96 */
  97STATIC xfs_trans_t *
  98xfs_trans_dup(
  99	xfs_trans_t	*tp)
 100{
 101	xfs_trans_t	*ntp;
 102
 103	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
 104
 105	/*
 106	 * Initialize the new transaction structure.
 107	 */
 108	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 109	ntp->t_mountp = tp->t_mountp;
 110	INIT_LIST_HEAD(&ntp->t_items);
 111	INIT_LIST_HEAD(&ntp->t_busy);
 112
 113	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 114	ASSERT(tp->t_ticket != NULL);
 115
 116	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 117		       (tp->t_flags & XFS_TRANS_RESERVE) |
 118		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
 119	/* We gave our writer reference to the new transaction */
 120	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 121	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 122
 123	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
 124	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 125	tp->t_blk_res = tp->t_blk_res_used;
 126
 127	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 128	tp->t_rtx_res = tp->t_rtx_res_used;
 129	ntp->t_pflags = tp->t_pflags;
 130
 131	xfs_trans_dup_dqinfo(tp, ntp);
 132
 133	atomic_inc(&tp->t_mountp->m_active_trans);
 134	return ntp;
 135}
 136
 137/*
 138 * This is called to reserve free disk blocks and log space for the
 139 * given transaction.  This must be done before allocating any resources
 140 * within the transaction.
 141 *
 142 * This will return ENOSPC if there are not enough blocks available.
 143 * It will sleep waiting for available log space.
 144 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 145 * is used by long running transactions.  If any one of the reservations
 146 * fails then they will all be backed out.
 147 *
 148 * This does not do quota reservations. That typically is done by the
 149 * caller afterwards.
 150 */
 151static int
 152xfs_trans_reserve(
 153	struct xfs_trans	*tp,
 154	struct xfs_trans_res	*resp,
 155	uint			blocks,
 156	uint			rtextents)
 157{
 158	int		error = 0;
 159	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 160
 161	/* Mark this thread as being in a transaction */
 162	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 163
 164	/*
 165	 * Attempt to reserve the needed disk blocks by decrementing
 166	 * the number needed from the number available.  This will
 167	 * fail if the count would go below zero.
 168	 */
 169	if (blocks > 0) {
 170		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 171		if (error != 0) {
 172			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 173			return -ENOSPC;
 174		}
 175		tp->t_blk_res += blocks;
 176	}
 177
 178	/*
 179	 * Reserve the log space needed for this transaction.
 180	 */
 181	if (resp->tr_logres > 0) {
 182		bool	permanent = false;
 183
 184		ASSERT(tp->t_log_res == 0 ||
 185		       tp->t_log_res == resp->tr_logres);
 186		ASSERT(tp->t_log_count == 0 ||
 187		       tp->t_log_count == resp->tr_logcount);
 188
 189		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 190			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 191			permanent = true;
 192		} else {
 193			ASSERT(tp->t_ticket == NULL);
 194			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 195		}
 196
 197		if (tp->t_ticket != NULL) {
 198			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 199			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 200		} else {
 201			error = xfs_log_reserve(tp->t_mountp,
 202						resp->tr_logres,
 203						resp->tr_logcount,
 204						&tp->t_ticket, XFS_TRANSACTION,
 205						permanent);
 206		}
 207
 208		if (error)
 209			goto undo_blocks;
 210
 211		tp->t_log_res = resp->tr_logres;
 212		tp->t_log_count = resp->tr_logcount;
 213	}
 214
 215	/*
 216	 * Attempt to reserve the needed realtime extents by decrementing
 217	 * the number needed from the number available.  This will
 218	 * fail if the count would go below zero.
 219	 */
 220	if (rtextents > 0) {
 221		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
 222		if (error) {
 223			error = -ENOSPC;
 224			goto undo_log;
 225		}
 226		tp->t_rtx_res += rtextents;
 227	}
 228
 229	return 0;
 230
 231	/*
 232	 * Error cases jump to one of these labels to undo any
 233	 * reservations which have already been performed.
 234	 */
 235undo_log:
 236	if (resp->tr_logres > 0) {
 237		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
 238		tp->t_ticket = NULL;
 239		tp->t_log_res = 0;
 240		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 241	}
 242
 243undo_blocks:
 244	if (blocks > 0) {
 245		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
 246		tp->t_blk_res = 0;
 247	}
 248
 249	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 250
 251	return error;
 252}
 253
 254int
 255xfs_trans_alloc(
 256	struct xfs_mount	*mp,
 257	struct xfs_trans_res	*resp,
 258	uint			blocks,
 259	uint			rtextents,
 260	uint			flags,
 261	struct xfs_trans	**tpp)
 262{
 263	struct xfs_trans	*tp;
 264	int			error;
 265
 266	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 267		sb_start_intwrite(mp->m_super);
 268
 269	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 270	atomic_inc(&mp->m_active_trans);
 271
 272	tp = kmem_zone_zalloc(xfs_trans_zone,
 273		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
 274	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 275	tp->t_flags = flags;
 276	tp->t_mountp = mp;
 277	INIT_LIST_HEAD(&tp->t_items);
 278	INIT_LIST_HEAD(&tp->t_busy);
 279
 280	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 281	if (error) {
 282		xfs_trans_cancel(tp);
 283		return error;
 284	}
 285
 286	*tpp = tp;
 287	return 0;
 288}
 289
 290/*
 291 * Create an empty transaction with no reservation.  This is a defensive
 292 * mechanism for routines that query metadata without actually modifying
 293 * them -- if the metadata being queried is somehow cross-linked (think a
 294 * btree block pointer that points higher in the tree), we risk deadlock.
 295 * However, blocks grabbed as part of a transaction can be re-grabbed.
 296 * The verifiers will notice the corrupt block and the operation will fail
 297 * back to userspace without deadlocking.
 298 *
 299 * Note the zero-length reservation; this transaction MUST be cancelled
 300 * without any dirty data.
 301 */
 302int
 303xfs_trans_alloc_empty(
 304	struct xfs_mount		*mp,
 305	struct xfs_trans		**tpp)
 306{
 307	struct xfs_trans_res		resv = {0};
 308
 309	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
 310}
 311
 312/*
 313 * Record the indicated change to the given field for application
 314 * to the file system's superblock when the transaction commits.
 315 * For now, just store the change in the transaction structure.
 316 *
 317 * Mark the transaction structure to indicate that the superblock
 318 * needs to be updated before committing.
 319 *
 320 * Because we may not be keeping track of allocated/free inodes and
 321 * used filesystem blocks in the superblock, we do not mark the
 322 * superblock dirty in this transaction if we modify these fields.
 323 * We still need to update the transaction deltas so that they get
 324 * applied to the incore superblock, but we don't want them to
 325 * cause the superblock to get locked and logged if these are the
 326 * only fields in the superblock that the transaction modifies.
 327 */
 328void
 329xfs_trans_mod_sb(
 330	xfs_trans_t	*tp,
 331	uint		field,
 332	int64_t		delta)
 333{
 334	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 335	xfs_mount_t	*mp = tp->t_mountp;
 336
 337	switch (field) {
 338	case XFS_TRANS_SB_ICOUNT:
 339		tp->t_icount_delta += delta;
 340		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 341			flags &= ~XFS_TRANS_SB_DIRTY;
 342		break;
 343	case XFS_TRANS_SB_IFREE:
 344		tp->t_ifree_delta += delta;
 345		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 346			flags &= ~XFS_TRANS_SB_DIRTY;
 347		break;
 348	case XFS_TRANS_SB_FDBLOCKS:
 349		/*
 350		 * Track the number of blocks allocated in the transaction.
 351		 * Make sure it does not exceed the number reserved. If so,
 352		 * shutdown as this can lead to accounting inconsistency.
 353		 */
 354		if (delta < 0) {
 355			tp->t_blk_res_used += (uint)-delta;
 356			if (tp->t_blk_res_used > tp->t_blk_res)
 357				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 358		}
 359		tp->t_fdblocks_delta += delta;
 360		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 361			flags &= ~XFS_TRANS_SB_DIRTY;
 362		break;
 363	case XFS_TRANS_SB_RES_FDBLOCKS:
 364		/*
 365		 * The allocation has already been applied to the
 366		 * in-core superblock's counter.  This should only
 367		 * be applied to the on-disk superblock.
 368		 */
 369		tp->t_res_fdblocks_delta += delta;
 370		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 371			flags &= ~XFS_TRANS_SB_DIRTY;
 372		break;
 373	case XFS_TRANS_SB_FREXTENTS:
 374		/*
 375		 * Track the number of blocks allocated in the
 376		 * transaction.  Make sure it does not exceed the
 377		 * number reserved.
 378		 */
 379		if (delta < 0) {
 380			tp->t_rtx_res_used += (uint)-delta;
 381			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 382		}
 383		tp->t_frextents_delta += delta;
 384		break;
 385	case XFS_TRANS_SB_RES_FREXTENTS:
 386		/*
 387		 * The allocation has already been applied to the
 388		 * in-core superblock's counter.  This should only
 389		 * be applied to the on-disk superblock.
 390		 */
 391		ASSERT(delta < 0);
 392		tp->t_res_frextents_delta += delta;
 393		break;
 394	case XFS_TRANS_SB_DBLOCKS:
 395		ASSERT(delta > 0);
 396		tp->t_dblocks_delta += delta;
 397		break;
 398	case XFS_TRANS_SB_AGCOUNT:
 399		ASSERT(delta > 0);
 400		tp->t_agcount_delta += delta;
 401		break;
 402	case XFS_TRANS_SB_IMAXPCT:
 403		tp->t_imaxpct_delta += delta;
 404		break;
 405	case XFS_TRANS_SB_REXTSIZE:
 406		tp->t_rextsize_delta += delta;
 407		break;
 408	case XFS_TRANS_SB_RBMBLOCKS:
 409		tp->t_rbmblocks_delta += delta;
 410		break;
 411	case XFS_TRANS_SB_RBLOCKS:
 412		tp->t_rblocks_delta += delta;
 413		break;
 414	case XFS_TRANS_SB_REXTENTS:
 415		tp->t_rextents_delta += delta;
 416		break;
 417	case XFS_TRANS_SB_REXTSLOG:
 418		tp->t_rextslog_delta += delta;
 419		break;
 420	default:
 421		ASSERT(0);
 422		return;
 423	}
 424
 425	tp->t_flags |= flags;
 426}
 427
 428/*
 429 * xfs_trans_apply_sb_deltas() is called from the commit code
 430 * to bring the superblock buffer into the current transaction
 431 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 432 *
 433 * For now we just look at each field allowed to change and change
 434 * it if necessary.
 435 */
 436STATIC void
 437xfs_trans_apply_sb_deltas(
 438	xfs_trans_t	*tp)
 439{
 440	xfs_dsb_t	*sbp;
 441	xfs_buf_t	*bp;
 442	int		whole = 0;
 443
 444	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 445	sbp = XFS_BUF_TO_SBP(bp);
 446
 447	/*
 448	 * Check that superblock mods match the mods made to AGF counters.
 449	 */
 450	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 451	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 452		tp->t_ag_btree_delta));
 453
 454	/*
 455	 * Only update the superblock counters if we are logging them
 456	 */
 457	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 458		if (tp->t_icount_delta)
 459			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 460		if (tp->t_ifree_delta)
 461			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 462		if (tp->t_fdblocks_delta)
 463			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 464		if (tp->t_res_fdblocks_delta)
 465			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 466	}
 467
 468	if (tp->t_frextents_delta)
 469		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 470	if (tp->t_res_frextents_delta)
 471		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 472
 473	if (tp->t_dblocks_delta) {
 474		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 475		whole = 1;
 476	}
 477	if (tp->t_agcount_delta) {
 478		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 479		whole = 1;
 480	}
 481	if (tp->t_imaxpct_delta) {
 482		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 483		whole = 1;
 484	}
 485	if (tp->t_rextsize_delta) {
 486		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 487		whole = 1;
 488	}
 489	if (tp->t_rbmblocks_delta) {
 490		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 491		whole = 1;
 492	}
 493	if (tp->t_rblocks_delta) {
 494		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 495		whole = 1;
 496	}
 497	if (tp->t_rextents_delta) {
 498		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 499		whole = 1;
 500	}
 501	if (tp->t_rextslog_delta) {
 502		sbp->sb_rextslog += tp->t_rextslog_delta;
 503		whole = 1;
 504	}
 505
 506	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 507	if (whole)
 508		/*
 509		 * Log the whole thing, the fields are noncontiguous.
 510		 */
 511		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 512	else
 513		/*
 514		 * Since all the modifiable fields are contiguous, we
 515		 * can get away with this.
 516		 */
 517		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 518				  offsetof(xfs_dsb_t, sb_frextents) +
 519				  sizeof(sbp->sb_frextents) - 1);
 520}
 521
 522STATIC int
 523xfs_sb_mod8(
 524	uint8_t			*field,
 525	int8_t			delta)
 526{
 527	int8_t			counter = *field;
 528
 529	counter += delta;
 530	if (counter < 0) {
 531		ASSERT(0);
 532		return -EINVAL;
 533	}
 534	*field = counter;
 535	return 0;
 536}
 537
 538STATIC int
 539xfs_sb_mod32(
 540	uint32_t		*field,
 541	int32_t			delta)
 542{
 543	int32_t			counter = *field;
 544
 545	counter += delta;
 546	if (counter < 0) {
 547		ASSERT(0);
 548		return -EINVAL;
 549	}
 550	*field = counter;
 551	return 0;
 552}
 553
 554STATIC int
 555xfs_sb_mod64(
 556	uint64_t		*field,
 557	int64_t			delta)
 558{
 559	int64_t			counter = *field;
 560
 561	counter += delta;
 562	if (counter < 0) {
 563		ASSERT(0);
 564		return -EINVAL;
 565	}
 566	*field = counter;
 567	return 0;
 568}
 569
 570/*
 571 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 572 * and apply superblock counter changes to the in-core superblock.  The
 573 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 574 * applied to the in-core superblock.  The idea is that that has already been
 575 * done.
 576 *
 577 * If we are not logging superblock counters, then the inode allocated/free and
 578 * used block counts are not updated in the on disk superblock. In this case,
 579 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 580 * still need to update the incore superblock with the changes.
 581 */
 582void
 583xfs_trans_unreserve_and_mod_sb(
 584	struct xfs_trans	*tp)
 585{
 586	struct xfs_mount	*mp = tp->t_mountp;
 587	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 588	int64_t			blkdelta = 0;
 589	int64_t			rtxdelta = 0;
 590	int64_t			idelta = 0;
 591	int64_t			ifreedelta = 0;
 592	int			error;
 593
 594	/* calculate deltas */
 595	if (tp->t_blk_res > 0)
 596		blkdelta = tp->t_blk_res;
 597	if ((tp->t_fdblocks_delta != 0) &&
 598	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 599	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 600	        blkdelta += tp->t_fdblocks_delta;
 601
 602	if (tp->t_rtx_res > 0)
 603		rtxdelta = tp->t_rtx_res;
 604	if ((tp->t_frextents_delta != 0) &&
 605	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 606		rtxdelta += tp->t_frextents_delta;
 607
 608	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 609	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 610		idelta = tp->t_icount_delta;
 611		ifreedelta = tp->t_ifree_delta;
 612	}
 613
 614	/* apply the per-cpu counters */
 615	if (blkdelta) {
 616		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 617		if (error)
 618			goto out;
 619	}
 620
 621	if (idelta) {
 622		error = xfs_mod_icount(mp, idelta);
 623		if (error)
 624			goto out_undo_fdblocks;
 625	}
 626
 627	if (ifreedelta) {
 628		error = xfs_mod_ifree(mp, ifreedelta);
 629		if (error)
 630			goto out_undo_icount;
 631	}
 632
 633	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 634		return;
 635
 636	/* apply remaining deltas */
 637	spin_lock(&mp->m_sb_lock);
 638	if (rtxdelta) {
 639		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
 640		if (error)
 641			goto out_undo_ifree;
 642	}
 643
 644	if (tp->t_dblocks_delta != 0) {
 645		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
 646		if (error)
 647			goto out_undo_frextents;
 648	}
 649	if (tp->t_agcount_delta != 0) {
 650		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
 651		if (error)
 652			goto out_undo_dblocks;
 653	}
 654	if (tp->t_imaxpct_delta != 0) {
 655		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
 656		if (error)
 657			goto out_undo_agcount;
 658	}
 659	if (tp->t_rextsize_delta != 0) {
 660		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
 661				     tp->t_rextsize_delta);
 662		if (error)
 663			goto out_undo_imaxpct;
 664	}
 665	if (tp->t_rbmblocks_delta != 0) {
 666		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
 667				     tp->t_rbmblocks_delta);
 668		if (error)
 669			goto out_undo_rextsize;
 670	}
 671	if (tp->t_rblocks_delta != 0) {
 672		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
 673		if (error)
 674			goto out_undo_rbmblocks;
 675	}
 676	if (tp->t_rextents_delta != 0) {
 677		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
 678				     tp->t_rextents_delta);
 679		if (error)
 680			goto out_undo_rblocks;
 681	}
 682	if (tp->t_rextslog_delta != 0) {
 683		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
 684				     tp->t_rextslog_delta);
 685		if (error)
 686			goto out_undo_rextents;
 687	}
 688	spin_unlock(&mp->m_sb_lock);
 689	return;
 690
 691out_undo_rextents:
 692	if (tp->t_rextents_delta)
 693		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
 694out_undo_rblocks:
 695	if (tp->t_rblocks_delta)
 696		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
 697out_undo_rbmblocks:
 698	if (tp->t_rbmblocks_delta)
 699		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
 700out_undo_rextsize:
 701	if (tp->t_rextsize_delta)
 702		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
 703out_undo_imaxpct:
 704	if (tp->t_rextsize_delta)
 705		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
 706out_undo_agcount:
 707	if (tp->t_agcount_delta)
 708		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
 709out_undo_dblocks:
 710	if (tp->t_dblocks_delta)
 711		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
 712out_undo_frextents:
 713	if (rtxdelta)
 714		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
 715out_undo_ifree:
 716	spin_unlock(&mp->m_sb_lock);
 717	if (ifreedelta)
 718		xfs_mod_ifree(mp, -ifreedelta);
 719out_undo_icount:
 720	if (idelta)
 721		xfs_mod_icount(mp, -idelta);
 722out_undo_fdblocks:
 723	if (blkdelta)
 724		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
 725out:
 726	ASSERT(error == 0);
 727	return;
 728}
 729
 730/*
 731 * Add the given log item to the transaction's list of log items.
 732 *
 733 * The log item will now point to its new descriptor with its li_desc field.
 734 */
 735void
 736xfs_trans_add_item(
 737	struct xfs_trans	*tp,
 738	struct xfs_log_item	*lip)
 739{
 740	struct xfs_log_item_desc *lidp;
 741
 742	ASSERT(lip->li_mountp == tp->t_mountp);
 743	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 744
 745	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
 746
 747	lidp->lid_item = lip;
 748	lidp->lid_flags = 0;
 749	list_add_tail(&lidp->lid_trans, &tp->t_items);
 750
 751	lip->li_desc = lidp;
 752}
 753
 754STATIC void
 755xfs_trans_free_item_desc(
 756	struct xfs_log_item_desc *lidp)
 757{
 758	list_del_init(&lidp->lid_trans);
 759	kmem_zone_free(xfs_log_item_desc_zone, lidp);
 760}
 761
 762/*
 763 * Unlink and free the given descriptor.
 764 */
 765void
 766xfs_trans_del_item(
 767	struct xfs_log_item	*lip)
 768{
 769	xfs_trans_free_item_desc(lip->li_desc);
 770	lip->li_desc = NULL;
 771}
 772
 773/*
 774 * Unlock all of the items of a transaction and free all the descriptors
 775 * of that transaction.
 776 */
 777void
 778xfs_trans_free_items(
 779	struct xfs_trans	*tp,
 780	xfs_lsn_t		commit_lsn,
 781	bool			abort)
 782{
 783	struct xfs_log_item_desc *lidp, *next;
 784
 785	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
 786		struct xfs_log_item	*lip = lidp->lid_item;
 787
 788		lip->li_desc = NULL;
 789
 790		if (commit_lsn != NULLCOMMITLSN)
 791			lip->li_ops->iop_committing(lip, commit_lsn);
 792		if (abort)
 793			lip->li_flags |= XFS_LI_ABORTED;
 794		lip->li_ops->iop_unlock(lip);
 795
 796		xfs_trans_free_item_desc(lidp);
 797	}
 798}
 799
 800static inline void
 801xfs_log_item_batch_insert(
 802	struct xfs_ail		*ailp,
 803	struct xfs_ail_cursor	*cur,
 804	struct xfs_log_item	**log_items,
 805	int			nr_items,
 806	xfs_lsn_t		commit_lsn)
 807{
 808	int	i;
 809
 810	spin_lock(&ailp->ail_lock);
 811	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 812	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 813
 814	for (i = 0; i < nr_items; i++) {
 815		struct xfs_log_item *lip = log_items[i];
 816
 817		lip->li_ops->iop_unpin(lip, 0);
 818	}
 819}
 820
 821/*
 822 * Bulk operation version of xfs_trans_committed that takes a log vector of
 823 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 824 * minimise lock traffic.
 825 *
 826 * If we are called with the aborted flag set, it is because a log write during
 827 * a CIL checkpoint commit has failed. In this case, all the items in the
 828 * checkpoint have already gone through iop_commited and iop_unlock, which
 829 * means that checkpoint commit abort handling is treated exactly the same
 830 * as an iclog write error even though we haven't started any IO yet. Hence in
 831 * this case all we need to do is iop_committed processing, followed by an
 832 * iop_unpin(aborted) call.
 833 *
 834 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 835 * at the end of the AIL, the insert cursor avoids the need to walk
 836 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 837 * call. This saves a lot of needless list walking and is a net win, even
 838 * though it slightly increases that amount of AIL lock traffic to set it up
 839 * and tear it down.
 840 */
 841void
 842xfs_trans_committed_bulk(
 843	struct xfs_ail		*ailp,
 844	struct xfs_log_vec	*log_vector,
 845	xfs_lsn_t		commit_lsn,
 846	int			aborted)
 847{
 848#define LOG_ITEM_BATCH_SIZE	32
 849	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 850	struct xfs_log_vec	*lv;
 851	struct xfs_ail_cursor	cur;
 852	int			i = 0;
 853
 854	spin_lock(&ailp->ail_lock);
 855	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 856	spin_unlock(&ailp->ail_lock);
 857
 858	/* unpin all the log items */
 859	for (lv = log_vector; lv; lv = lv->lv_next ) {
 860		struct xfs_log_item	*lip = lv->lv_item;
 861		xfs_lsn_t		item_lsn;
 862
 863		if (aborted)
 864			lip->li_flags |= XFS_LI_ABORTED;
 865		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 866
 867		/* item_lsn of -1 means the item needs no further processing */
 868		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 869			continue;
 870
 871		/*
 872		 * if we are aborting the operation, no point in inserting the
 873		 * object into the AIL as we are in a shutdown situation.
 874		 */
 875		if (aborted) {
 876			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
 877			lip->li_ops->iop_unpin(lip, 1);
 878			continue;
 879		}
 880
 881		if (item_lsn != commit_lsn) {
 882
 883			/*
 884			 * Not a bulk update option due to unusual item_lsn.
 885			 * Push into AIL immediately, rechecking the lsn once
 886			 * we have the ail lock. Then unpin the item. This does
 887			 * not affect the AIL cursor the bulk insert path is
 888			 * using.
 889			 */
 890			spin_lock(&ailp->ail_lock);
 891			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 892				xfs_trans_ail_update(ailp, lip, item_lsn);
 893			else
 894				spin_unlock(&ailp->ail_lock);
 895			lip->li_ops->iop_unpin(lip, 0);
 896			continue;
 897		}
 898
 899		/* Item is a candidate for bulk AIL insert.  */
 900		log_items[i++] = lv->lv_item;
 901		if (i >= LOG_ITEM_BATCH_SIZE) {
 902			xfs_log_item_batch_insert(ailp, &cur, log_items,
 903					LOG_ITEM_BATCH_SIZE, commit_lsn);
 904			i = 0;
 905		}
 906	}
 907
 908	/* make sure we insert the remainder! */
 909	if (i)
 910		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 911
 912	spin_lock(&ailp->ail_lock);
 913	xfs_trans_ail_cursor_done(&cur);
 914	spin_unlock(&ailp->ail_lock);
 915}
 916
 917/*
 918 * Commit the given transaction to the log.
 919 *
 920 * XFS disk error handling mechanism is not based on a typical
 921 * transaction abort mechanism. Logically after the filesystem
 922 * gets marked 'SHUTDOWN', we can't let any new transactions
 923 * be durable - ie. committed to disk - because some metadata might
 924 * be inconsistent. In such cases, this returns an error, and the
 925 * caller may assume that all locked objects joined to the transaction
 926 * have already been unlocked as if the commit had succeeded.
 927 * Do not reference the transaction structure after this call.
 928 */
 929static int
 930__xfs_trans_commit(
 931	struct xfs_trans	*tp,
 932	bool			regrant)
 933{
 934	struct xfs_mount	*mp = tp->t_mountp;
 935	xfs_lsn_t		commit_lsn = -1;
 936	int			error = 0;
 937	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 938
 939	/*
 940	 * If there is nothing to be logged by the transaction,
 941	 * then unlock all of the items associated with the
 942	 * transaction and free the transaction structure.
 943	 * Also make sure to return any reserved blocks to
 944	 * the free pool.
 945	 */
 946	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 947		goto out_unreserve;
 948
 949	if (XFS_FORCED_SHUTDOWN(mp)) {
 950		error = -EIO;
 951		goto out_unreserve;
 952	}
 953
 954	ASSERT(tp->t_ticket != NULL);
 955
 956	/*
 957	 * If we need to update the superblock, then do it now.
 958	 */
 959	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 960		xfs_trans_apply_sb_deltas(tp);
 961	xfs_trans_apply_dquot_deltas(tp);
 962
 963	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 964
 965	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 966	xfs_trans_free(tp);
 967
 968	/*
 969	 * If the transaction needs to be synchronous, then force the
 970	 * log out now and wait for it.
 971	 */
 972	if (sync) {
 973		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 974		XFS_STATS_INC(mp, xs_trans_sync);
 975	} else {
 976		XFS_STATS_INC(mp, xs_trans_async);
 977	}
 978
 979	return error;
 980
 981out_unreserve:
 982	xfs_trans_unreserve_and_mod_sb(tp);
 983
 984	/*
 985	 * It is indeed possible for the transaction to be not dirty but
 986	 * the dqinfo portion to be.  All that means is that we have some
 987	 * (non-persistent) quota reservations that need to be unreserved.
 988	 */
 989	xfs_trans_unreserve_and_mod_dquots(tp);
 990	if (tp->t_ticket) {
 991		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
 992		if (commit_lsn == -1 && !error)
 993			error = -EIO;
 994	}
 995	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 996	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
 997	xfs_trans_free(tp);
 998
 999	XFS_STATS_INC(mp, xs_trans_empty);
1000	return error;
1001}
1002
1003int
1004xfs_trans_commit(
1005	struct xfs_trans	*tp)
1006{
1007	return __xfs_trans_commit(tp, false);
1008}
1009
1010/*
1011 * Unlock all of the transaction's items and free the transaction.
1012 * The transaction must not have modified any of its items, because
1013 * there is no way to restore them to their previous state.
1014 *
1015 * If the transaction has made a log reservation, make sure to release
1016 * it as well.
1017 */
1018void
1019xfs_trans_cancel(
1020	struct xfs_trans	*tp)
1021{
1022	struct xfs_mount	*mp = tp->t_mountp;
1023	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1024
1025	/*
1026	 * See if the caller is relying on us to shut down the
1027	 * filesystem.  This happens in paths where we detect
1028	 * corruption and decide to give up.
1029	 */
1030	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1031		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1032		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1033	}
1034#ifdef DEBUG
1035	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1036		struct xfs_log_item_desc *lidp;
1037
1038		list_for_each_entry(lidp, &tp->t_items, lid_trans)
1039			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1040	}
1041#endif
1042	xfs_trans_unreserve_and_mod_sb(tp);
1043	xfs_trans_unreserve_and_mod_dquots(tp);
1044
1045	if (tp->t_ticket)
1046		xfs_log_done(mp, tp->t_ticket, NULL, false);
1047
1048	/* mark this thread as no longer being in a transaction */
1049	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1050
1051	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1052	xfs_trans_free(tp);
1053}
1054
1055/*
1056 * Roll from one trans in the sequence of PERMANENT transactions to
1057 * the next: permanent transactions are only flushed out when
1058 * committed with xfs_trans_commit(), but we still want as soon
1059 * as possible to let chunks of it go to the log. So we commit the
1060 * chunk we've been working on and get a new transaction to continue.
1061 */
1062int
1063xfs_trans_roll(
1064	struct xfs_trans	**tpp)
 
 
1065{
1066	struct xfs_trans	*trans = *tpp;
1067	struct xfs_trans_res	tres;
1068	int			error;
1069
 
 
 
 
 
 
 
 
 
1070	/*
1071	 * Copy the critical parameters from one trans to the next.
1072	 */
1073	tres.tr_logres = trans->t_log_res;
1074	tres.tr_logcount = trans->t_log_count;
1075
1076	*tpp = xfs_trans_dup(trans);
1077
1078	/*
1079	 * Commit the current transaction.
1080	 * If this commit failed, then it'd just unlock those items that
1081	 * are not marked ihold. That also means that a filesystem shutdown
1082	 * is in progress. The caller takes the responsibility to cancel
1083	 * the duplicate transaction that gets returned.
1084	 */
1085	error = __xfs_trans_commit(trans, true);
1086	if (error)
1087		return error;
1088
 
 
 
1089	/*
1090	 * Reserve space in the log for the next transaction.
1091	 * This also pushes items in the "AIL", the list of logged items,
1092	 * out to disk if they are taking up space at the tail of the log
1093	 * that we want to use.  This requires that either nothing be locked
1094	 * across this call, or that anything that is locked be logged in
1095	 * the prior and the next transactions.
1096	 */
1097	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1098	return xfs_trans_reserve(*tpp, &tres, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1099}
v4.10.11
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_mount.h"
  26#include "xfs_inode.h"
  27#include "xfs_extent_busy.h"
  28#include "xfs_quota.h"
  29#include "xfs_trans.h"
  30#include "xfs_trans_priv.h"
  31#include "xfs_log.h"
  32#include "xfs_trace.h"
  33#include "xfs_error.h"
  34
  35kmem_zone_t	*xfs_trans_zone;
  36kmem_zone_t	*xfs_log_item_desc_zone;
  37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  38/*
  39 * Initialize the precomputed transaction reservation values
  40 * in the mount structure.
  41 */
  42void
  43xfs_trans_init(
  44	struct xfs_mount	*mp)
  45{
  46	xfs_trans_resv_calc(mp, M_RES(mp));
 
  47}
  48
  49/*
  50 * Free the transaction structure.  If there is more clean up
  51 * to do when the structure is freed, add it here.
  52 */
  53STATIC void
  54xfs_trans_free(
  55	struct xfs_trans	*tp)
  56{
  57	xfs_extent_busy_sort(&tp->t_busy);
  58	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  59
  60	atomic_dec(&tp->t_mountp->m_active_trans);
  61	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  62		sb_end_intwrite(tp->t_mountp->m_super);
  63	xfs_trans_free_dqinfo(tp);
  64	kmem_zone_free(xfs_trans_zone, tp);
  65}
  66
  67/*
  68 * This is called to create a new transaction which will share the
  69 * permanent log reservation of the given transaction.  The remaining
  70 * unused block and rt extent reservations are also inherited.  This
  71 * implies that the original transaction is no longer allowed to allocate
  72 * blocks.  Locks and log items, however, are no inherited.  They must
  73 * be added to the new transaction explicitly.
  74 */
  75STATIC xfs_trans_t *
  76xfs_trans_dup(
  77	xfs_trans_t	*tp)
  78{
  79	xfs_trans_t	*ntp;
  80
  81	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
  82
  83	/*
  84	 * Initialize the new transaction structure.
  85	 */
  86	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
  87	ntp->t_mountp = tp->t_mountp;
  88	INIT_LIST_HEAD(&ntp->t_items);
  89	INIT_LIST_HEAD(&ntp->t_busy);
  90
  91	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  92	ASSERT(tp->t_ticket != NULL);
  93
  94	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
  95		       (tp->t_flags & XFS_TRANS_RESERVE) |
  96		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
  97	/* We gave our writer reference to the new transaction */
  98	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
  99	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 
 
 100	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 101	tp->t_blk_res = tp->t_blk_res_used;
 
 102	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 103	tp->t_rtx_res = tp->t_rtx_res_used;
 104	ntp->t_pflags = tp->t_pflags;
 105
 106	xfs_trans_dup_dqinfo(tp, ntp);
 107
 108	atomic_inc(&tp->t_mountp->m_active_trans);
 109	return ntp;
 110}
 111
 112/*
 113 * This is called to reserve free disk blocks and log space for the
 114 * given transaction.  This must be done before allocating any resources
 115 * within the transaction.
 116 *
 117 * This will return ENOSPC if there are not enough blocks available.
 118 * It will sleep waiting for available log space.
 119 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 120 * is used by long running transactions.  If any one of the reservations
 121 * fails then they will all be backed out.
 122 *
 123 * This does not do quota reservations. That typically is done by the
 124 * caller afterwards.
 125 */
 126static int
 127xfs_trans_reserve(
 128	struct xfs_trans	*tp,
 129	struct xfs_trans_res	*resp,
 130	uint			blocks,
 131	uint			rtextents)
 132{
 133	int		error = 0;
 134	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 135
 136	/* Mark this thread as being in a transaction */
 137	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
 138
 139	/*
 140	 * Attempt to reserve the needed disk blocks by decrementing
 141	 * the number needed from the number available.  This will
 142	 * fail if the count would go below zero.
 143	 */
 144	if (blocks > 0) {
 145		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 146		if (error != 0) {
 147			current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 148			return -ENOSPC;
 149		}
 150		tp->t_blk_res += blocks;
 151	}
 152
 153	/*
 154	 * Reserve the log space needed for this transaction.
 155	 */
 156	if (resp->tr_logres > 0) {
 157		bool	permanent = false;
 158
 159		ASSERT(tp->t_log_res == 0 ||
 160		       tp->t_log_res == resp->tr_logres);
 161		ASSERT(tp->t_log_count == 0 ||
 162		       tp->t_log_count == resp->tr_logcount);
 163
 164		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 165			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 166			permanent = true;
 167		} else {
 168			ASSERT(tp->t_ticket == NULL);
 169			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 170		}
 171
 172		if (tp->t_ticket != NULL) {
 173			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 174			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 175		} else {
 176			error = xfs_log_reserve(tp->t_mountp,
 177						resp->tr_logres,
 178						resp->tr_logcount,
 179						&tp->t_ticket, XFS_TRANSACTION,
 180						permanent);
 181		}
 182
 183		if (error)
 184			goto undo_blocks;
 185
 186		tp->t_log_res = resp->tr_logres;
 187		tp->t_log_count = resp->tr_logcount;
 188	}
 189
 190	/*
 191	 * Attempt to reserve the needed realtime extents by decrementing
 192	 * the number needed from the number available.  This will
 193	 * fail if the count would go below zero.
 194	 */
 195	if (rtextents > 0) {
 196		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
 197		if (error) {
 198			error = -ENOSPC;
 199			goto undo_log;
 200		}
 201		tp->t_rtx_res += rtextents;
 202	}
 203
 204	return 0;
 205
 206	/*
 207	 * Error cases jump to one of these labels to undo any
 208	 * reservations which have already been performed.
 209	 */
 210undo_log:
 211	if (resp->tr_logres > 0) {
 212		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
 213		tp->t_ticket = NULL;
 214		tp->t_log_res = 0;
 215		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 216	}
 217
 218undo_blocks:
 219	if (blocks > 0) {
 220		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
 221		tp->t_blk_res = 0;
 222	}
 223
 224	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 225
 226	return error;
 227}
 228
 229int
 230xfs_trans_alloc(
 231	struct xfs_mount	*mp,
 232	struct xfs_trans_res	*resp,
 233	uint			blocks,
 234	uint			rtextents,
 235	uint			flags,
 236	struct xfs_trans	**tpp)
 237{
 238	struct xfs_trans	*tp;
 239	int			error;
 240
 241	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 242		sb_start_intwrite(mp->m_super);
 243
 244	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 245	atomic_inc(&mp->m_active_trans);
 246
 247	tp = kmem_zone_zalloc(xfs_trans_zone,
 248		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
 249	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 250	tp->t_flags = flags;
 251	tp->t_mountp = mp;
 252	INIT_LIST_HEAD(&tp->t_items);
 253	INIT_LIST_HEAD(&tp->t_busy);
 254
 255	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 256	if (error) {
 257		xfs_trans_cancel(tp);
 258		return error;
 259	}
 260
 261	*tpp = tp;
 262	return 0;
 263}
 264
 265/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266 * Record the indicated change to the given field for application
 267 * to the file system's superblock when the transaction commits.
 268 * For now, just store the change in the transaction structure.
 269 *
 270 * Mark the transaction structure to indicate that the superblock
 271 * needs to be updated before committing.
 272 *
 273 * Because we may not be keeping track of allocated/free inodes and
 274 * used filesystem blocks in the superblock, we do not mark the
 275 * superblock dirty in this transaction if we modify these fields.
 276 * We still need to update the transaction deltas so that they get
 277 * applied to the incore superblock, but we don't want them to
 278 * cause the superblock to get locked and logged if these are the
 279 * only fields in the superblock that the transaction modifies.
 280 */
 281void
 282xfs_trans_mod_sb(
 283	xfs_trans_t	*tp,
 284	uint		field,
 285	int64_t		delta)
 286{
 287	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 288	xfs_mount_t	*mp = tp->t_mountp;
 289
 290	switch (field) {
 291	case XFS_TRANS_SB_ICOUNT:
 292		tp->t_icount_delta += delta;
 293		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 294			flags &= ~XFS_TRANS_SB_DIRTY;
 295		break;
 296	case XFS_TRANS_SB_IFREE:
 297		tp->t_ifree_delta += delta;
 298		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 299			flags &= ~XFS_TRANS_SB_DIRTY;
 300		break;
 301	case XFS_TRANS_SB_FDBLOCKS:
 302		/*
 303		 * Track the number of blocks allocated in the
 304		 * transaction.  Make sure it does not exceed the
 305		 * number reserved.
 306		 */
 307		if (delta < 0) {
 308			tp->t_blk_res_used += (uint)-delta;
 309			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
 
 310		}
 311		tp->t_fdblocks_delta += delta;
 312		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 313			flags &= ~XFS_TRANS_SB_DIRTY;
 314		break;
 315	case XFS_TRANS_SB_RES_FDBLOCKS:
 316		/*
 317		 * The allocation has already been applied to the
 318		 * in-core superblock's counter.  This should only
 319		 * be applied to the on-disk superblock.
 320		 */
 321		tp->t_res_fdblocks_delta += delta;
 322		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 323			flags &= ~XFS_TRANS_SB_DIRTY;
 324		break;
 325	case XFS_TRANS_SB_FREXTENTS:
 326		/*
 327		 * Track the number of blocks allocated in the
 328		 * transaction.  Make sure it does not exceed the
 329		 * number reserved.
 330		 */
 331		if (delta < 0) {
 332			tp->t_rtx_res_used += (uint)-delta;
 333			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 334		}
 335		tp->t_frextents_delta += delta;
 336		break;
 337	case XFS_TRANS_SB_RES_FREXTENTS:
 338		/*
 339		 * The allocation has already been applied to the
 340		 * in-core superblock's counter.  This should only
 341		 * be applied to the on-disk superblock.
 342		 */
 343		ASSERT(delta < 0);
 344		tp->t_res_frextents_delta += delta;
 345		break;
 346	case XFS_TRANS_SB_DBLOCKS:
 347		ASSERT(delta > 0);
 348		tp->t_dblocks_delta += delta;
 349		break;
 350	case XFS_TRANS_SB_AGCOUNT:
 351		ASSERT(delta > 0);
 352		tp->t_agcount_delta += delta;
 353		break;
 354	case XFS_TRANS_SB_IMAXPCT:
 355		tp->t_imaxpct_delta += delta;
 356		break;
 357	case XFS_TRANS_SB_REXTSIZE:
 358		tp->t_rextsize_delta += delta;
 359		break;
 360	case XFS_TRANS_SB_RBMBLOCKS:
 361		tp->t_rbmblocks_delta += delta;
 362		break;
 363	case XFS_TRANS_SB_RBLOCKS:
 364		tp->t_rblocks_delta += delta;
 365		break;
 366	case XFS_TRANS_SB_REXTENTS:
 367		tp->t_rextents_delta += delta;
 368		break;
 369	case XFS_TRANS_SB_REXTSLOG:
 370		tp->t_rextslog_delta += delta;
 371		break;
 372	default:
 373		ASSERT(0);
 374		return;
 375	}
 376
 377	tp->t_flags |= flags;
 378}
 379
 380/*
 381 * xfs_trans_apply_sb_deltas() is called from the commit code
 382 * to bring the superblock buffer into the current transaction
 383 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 384 *
 385 * For now we just look at each field allowed to change and change
 386 * it if necessary.
 387 */
 388STATIC void
 389xfs_trans_apply_sb_deltas(
 390	xfs_trans_t	*tp)
 391{
 392	xfs_dsb_t	*sbp;
 393	xfs_buf_t	*bp;
 394	int		whole = 0;
 395
 396	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 397	sbp = XFS_BUF_TO_SBP(bp);
 398
 399	/*
 400	 * Check that superblock mods match the mods made to AGF counters.
 401	 */
 402	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 403	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 404		tp->t_ag_btree_delta));
 405
 406	/*
 407	 * Only update the superblock counters if we are logging them
 408	 */
 409	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 410		if (tp->t_icount_delta)
 411			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 412		if (tp->t_ifree_delta)
 413			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 414		if (tp->t_fdblocks_delta)
 415			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 416		if (tp->t_res_fdblocks_delta)
 417			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 418	}
 419
 420	if (tp->t_frextents_delta)
 421		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 422	if (tp->t_res_frextents_delta)
 423		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 424
 425	if (tp->t_dblocks_delta) {
 426		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 427		whole = 1;
 428	}
 429	if (tp->t_agcount_delta) {
 430		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 431		whole = 1;
 432	}
 433	if (tp->t_imaxpct_delta) {
 434		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 435		whole = 1;
 436	}
 437	if (tp->t_rextsize_delta) {
 438		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 439		whole = 1;
 440	}
 441	if (tp->t_rbmblocks_delta) {
 442		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 443		whole = 1;
 444	}
 445	if (tp->t_rblocks_delta) {
 446		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 447		whole = 1;
 448	}
 449	if (tp->t_rextents_delta) {
 450		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 451		whole = 1;
 452	}
 453	if (tp->t_rextslog_delta) {
 454		sbp->sb_rextslog += tp->t_rextslog_delta;
 455		whole = 1;
 456	}
 457
 458	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 459	if (whole)
 460		/*
 461		 * Log the whole thing, the fields are noncontiguous.
 462		 */
 463		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 464	else
 465		/*
 466		 * Since all the modifiable fields are contiguous, we
 467		 * can get away with this.
 468		 */
 469		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 470				  offsetof(xfs_dsb_t, sb_frextents) +
 471				  sizeof(sbp->sb_frextents) - 1);
 472}
 473
 474STATIC int
 475xfs_sb_mod8(
 476	uint8_t			*field,
 477	int8_t			delta)
 478{
 479	int8_t			counter = *field;
 480
 481	counter += delta;
 482	if (counter < 0) {
 483		ASSERT(0);
 484		return -EINVAL;
 485	}
 486	*field = counter;
 487	return 0;
 488}
 489
 490STATIC int
 491xfs_sb_mod32(
 492	uint32_t		*field,
 493	int32_t			delta)
 494{
 495	int32_t			counter = *field;
 496
 497	counter += delta;
 498	if (counter < 0) {
 499		ASSERT(0);
 500		return -EINVAL;
 501	}
 502	*field = counter;
 503	return 0;
 504}
 505
 506STATIC int
 507xfs_sb_mod64(
 508	uint64_t		*field,
 509	int64_t			delta)
 510{
 511	int64_t			counter = *field;
 512
 513	counter += delta;
 514	if (counter < 0) {
 515		ASSERT(0);
 516		return -EINVAL;
 517	}
 518	*field = counter;
 519	return 0;
 520}
 521
 522/*
 523 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 524 * and apply superblock counter changes to the in-core superblock.  The
 525 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 526 * applied to the in-core superblock.  The idea is that that has already been
 527 * done.
 528 *
 529 * If we are not logging superblock counters, then the inode allocated/free and
 530 * used block counts are not updated in the on disk superblock. In this case,
 531 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 532 * still need to update the incore superblock with the changes.
 533 */
 534void
 535xfs_trans_unreserve_and_mod_sb(
 536	struct xfs_trans	*tp)
 537{
 538	struct xfs_mount	*mp = tp->t_mountp;
 539	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 540	int64_t			blkdelta = 0;
 541	int64_t			rtxdelta = 0;
 542	int64_t			idelta = 0;
 543	int64_t			ifreedelta = 0;
 544	int			error;
 545
 546	/* calculate deltas */
 547	if (tp->t_blk_res > 0)
 548		blkdelta = tp->t_blk_res;
 549	if ((tp->t_fdblocks_delta != 0) &&
 550	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 551	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 552	        blkdelta += tp->t_fdblocks_delta;
 553
 554	if (tp->t_rtx_res > 0)
 555		rtxdelta = tp->t_rtx_res;
 556	if ((tp->t_frextents_delta != 0) &&
 557	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 558		rtxdelta += tp->t_frextents_delta;
 559
 560	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 561	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 562		idelta = tp->t_icount_delta;
 563		ifreedelta = tp->t_ifree_delta;
 564	}
 565
 566	/* apply the per-cpu counters */
 567	if (blkdelta) {
 568		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 569		if (error)
 570			goto out;
 571	}
 572
 573	if (idelta) {
 574		error = xfs_mod_icount(mp, idelta);
 575		if (error)
 576			goto out_undo_fdblocks;
 577	}
 578
 579	if (ifreedelta) {
 580		error = xfs_mod_ifree(mp, ifreedelta);
 581		if (error)
 582			goto out_undo_icount;
 583	}
 584
 585	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 586		return;
 587
 588	/* apply remaining deltas */
 589	spin_lock(&mp->m_sb_lock);
 590	if (rtxdelta) {
 591		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
 592		if (error)
 593			goto out_undo_ifree;
 594	}
 595
 596	if (tp->t_dblocks_delta != 0) {
 597		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
 598		if (error)
 599			goto out_undo_frextents;
 600	}
 601	if (tp->t_agcount_delta != 0) {
 602		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
 603		if (error)
 604			goto out_undo_dblocks;
 605	}
 606	if (tp->t_imaxpct_delta != 0) {
 607		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
 608		if (error)
 609			goto out_undo_agcount;
 610	}
 611	if (tp->t_rextsize_delta != 0) {
 612		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
 613				     tp->t_rextsize_delta);
 614		if (error)
 615			goto out_undo_imaxpct;
 616	}
 617	if (tp->t_rbmblocks_delta != 0) {
 618		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
 619				     tp->t_rbmblocks_delta);
 620		if (error)
 621			goto out_undo_rextsize;
 622	}
 623	if (tp->t_rblocks_delta != 0) {
 624		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
 625		if (error)
 626			goto out_undo_rbmblocks;
 627	}
 628	if (tp->t_rextents_delta != 0) {
 629		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
 630				     tp->t_rextents_delta);
 631		if (error)
 632			goto out_undo_rblocks;
 633	}
 634	if (tp->t_rextslog_delta != 0) {
 635		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
 636				     tp->t_rextslog_delta);
 637		if (error)
 638			goto out_undo_rextents;
 639	}
 640	spin_unlock(&mp->m_sb_lock);
 641	return;
 642
 643out_undo_rextents:
 644	if (tp->t_rextents_delta)
 645		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
 646out_undo_rblocks:
 647	if (tp->t_rblocks_delta)
 648		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
 649out_undo_rbmblocks:
 650	if (tp->t_rbmblocks_delta)
 651		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
 652out_undo_rextsize:
 653	if (tp->t_rextsize_delta)
 654		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
 655out_undo_imaxpct:
 656	if (tp->t_rextsize_delta)
 657		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
 658out_undo_agcount:
 659	if (tp->t_agcount_delta)
 660		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
 661out_undo_dblocks:
 662	if (tp->t_dblocks_delta)
 663		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
 664out_undo_frextents:
 665	if (rtxdelta)
 666		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
 667out_undo_ifree:
 668	spin_unlock(&mp->m_sb_lock);
 669	if (ifreedelta)
 670		xfs_mod_ifree(mp, -ifreedelta);
 671out_undo_icount:
 672	if (idelta)
 673		xfs_mod_icount(mp, -idelta);
 674out_undo_fdblocks:
 675	if (blkdelta)
 676		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
 677out:
 678	ASSERT(error == 0);
 679	return;
 680}
 681
 682/*
 683 * Add the given log item to the transaction's list of log items.
 684 *
 685 * The log item will now point to its new descriptor with its li_desc field.
 686 */
 687void
 688xfs_trans_add_item(
 689	struct xfs_trans	*tp,
 690	struct xfs_log_item	*lip)
 691{
 692	struct xfs_log_item_desc *lidp;
 693
 694	ASSERT(lip->li_mountp == tp->t_mountp);
 695	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 696
 697	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
 698
 699	lidp->lid_item = lip;
 700	lidp->lid_flags = 0;
 701	list_add_tail(&lidp->lid_trans, &tp->t_items);
 702
 703	lip->li_desc = lidp;
 704}
 705
 706STATIC void
 707xfs_trans_free_item_desc(
 708	struct xfs_log_item_desc *lidp)
 709{
 710	list_del_init(&lidp->lid_trans);
 711	kmem_zone_free(xfs_log_item_desc_zone, lidp);
 712}
 713
 714/*
 715 * Unlink and free the given descriptor.
 716 */
 717void
 718xfs_trans_del_item(
 719	struct xfs_log_item	*lip)
 720{
 721	xfs_trans_free_item_desc(lip->li_desc);
 722	lip->li_desc = NULL;
 723}
 724
 725/*
 726 * Unlock all of the items of a transaction and free all the descriptors
 727 * of that transaction.
 728 */
 729void
 730xfs_trans_free_items(
 731	struct xfs_trans	*tp,
 732	xfs_lsn_t		commit_lsn,
 733	bool			abort)
 734{
 735	struct xfs_log_item_desc *lidp, *next;
 736
 737	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
 738		struct xfs_log_item	*lip = lidp->lid_item;
 739
 740		lip->li_desc = NULL;
 741
 742		if (commit_lsn != NULLCOMMITLSN)
 743			lip->li_ops->iop_committing(lip, commit_lsn);
 744		if (abort)
 745			lip->li_flags |= XFS_LI_ABORTED;
 746		lip->li_ops->iop_unlock(lip);
 747
 748		xfs_trans_free_item_desc(lidp);
 749	}
 750}
 751
 752static inline void
 753xfs_log_item_batch_insert(
 754	struct xfs_ail		*ailp,
 755	struct xfs_ail_cursor	*cur,
 756	struct xfs_log_item	**log_items,
 757	int			nr_items,
 758	xfs_lsn_t		commit_lsn)
 759{
 760	int	i;
 761
 762	spin_lock(&ailp->xa_lock);
 763	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
 764	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 765
 766	for (i = 0; i < nr_items; i++) {
 767		struct xfs_log_item *lip = log_items[i];
 768
 769		lip->li_ops->iop_unpin(lip, 0);
 770	}
 771}
 772
 773/*
 774 * Bulk operation version of xfs_trans_committed that takes a log vector of
 775 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 776 * minimise lock traffic.
 777 *
 778 * If we are called with the aborted flag set, it is because a log write during
 779 * a CIL checkpoint commit has failed. In this case, all the items in the
 780 * checkpoint have already gone through iop_commited and iop_unlock, which
 781 * means that checkpoint commit abort handling is treated exactly the same
 782 * as an iclog write error even though we haven't started any IO yet. Hence in
 783 * this case all we need to do is iop_committed processing, followed by an
 784 * iop_unpin(aborted) call.
 785 *
 786 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 787 * at the end of the AIL, the insert cursor avoids the need to walk
 788 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 789 * call. This saves a lot of needless list walking and is a net win, even
 790 * though it slightly increases that amount of AIL lock traffic to set it up
 791 * and tear it down.
 792 */
 793void
 794xfs_trans_committed_bulk(
 795	struct xfs_ail		*ailp,
 796	struct xfs_log_vec	*log_vector,
 797	xfs_lsn_t		commit_lsn,
 798	int			aborted)
 799{
 800#define LOG_ITEM_BATCH_SIZE	32
 801	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 802	struct xfs_log_vec	*lv;
 803	struct xfs_ail_cursor	cur;
 804	int			i = 0;
 805
 806	spin_lock(&ailp->xa_lock);
 807	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 808	spin_unlock(&ailp->xa_lock);
 809
 810	/* unpin all the log items */
 811	for (lv = log_vector; lv; lv = lv->lv_next ) {
 812		struct xfs_log_item	*lip = lv->lv_item;
 813		xfs_lsn_t		item_lsn;
 814
 815		if (aborted)
 816			lip->li_flags |= XFS_LI_ABORTED;
 817		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 818
 819		/* item_lsn of -1 means the item needs no further processing */
 820		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 821			continue;
 822
 823		/*
 824		 * if we are aborting the operation, no point in inserting the
 825		 * object into the AIL as we are in a shutdown situation.
 826		 */
 827		if (aborted) {
 828			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
 829			lip->li_ops->iop_unpin(lip, 1);
 830			continue;
 831		}
 832
 833		if (item_lsn != commit_lsn) {
 834
 835			/*
 836			 * Not a bulk update option due to unusual item_lsn.
 837			 * Push into AIL immediately, rechecking the lsn once
 838			 * we have the ail lock. Then unpin the item. This does
 839			 * not affect the AIL cursor the bulk insert path is
 840			 * using.
 841			 */
 842			spin_lock(&ailp->xa_lock);
 843			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 844				xfs_trans_ail_update(ailp, lip, item_lsn);
 845			else
 846				spin_unlock(&ailp->xa_lock);
 847			lip->li_ops->iop_unpin(lip, 0);
 848			continue;
 849		}
 850
 851		/* Item is a candidate for bulk AIL insert.  */
 852		log_items[i++] = lv->lv_item;
 853		if (i >= LOG_ITEM_BATCH_SIZE) {
 854			xfs_log_item_batch_insert(ailp, &cur, log_items,
 855					LOG_ITEM_BATCH_SIZE, commit_lsn);
 856			i = 0;
 857		}
 858	}
 859
 860	/* make sure we insert the remainder! */
 861	if (i)
 862		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 863
 864	spin_lock(&ailp->xa_lock);
 865	xfs_trans_ail_cursor_done(&cur);
 866	spin_unlock(&ailp->xa_lock);
 867}
 868
 869/*
 870 * Commit the given transaction to the log.
 871 *
 872 * XFS disk error handling mechanism is not based on a typical
 873 * transaction abort mechanism. Logically after the filesystem
 874 * gets marked 'SHUTDOWN', we can't let any new transactions
 875 * be durable - ie. committed to disk - because some metadata might
 876 * be inconsistent. In such cases, this returns an error, and the
 877 * caller may assume that all locked objects joined to the transaction
 878 * have already been unlocked as if the commit had succeeded.
 879 * Do not reference the transaction structure after this call.
 880 */
 881static int
 882__xfs_trans_commit(
 883	struct xfs_trans	*tp,
 884	bool			regrant)
 885{
 886	struct xfs_mount	*mp = tp->t_mountp;
 887	xfs_lsn_t		commit_lsn = -1;
 888	int			error = 0;
 889	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 890
 891	/*
 892	 * If there is nothing to be logged by the transaction,
 893	 * then unlock all of the items associated with the
 894	 * transaction and free the transaction structure.
 895	 * Also make sure to return any reserved blocks to
 896	 * the free pool.
 897	 */
 898	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 899		goto out_unreserve;
 900
 901	if (XFS_FORCED_SHUTDOWN(mp)) {
 902		error = -EIO;
 903		goto out_unreserve;
 904	}
 905
 906	ASSERT(tp->t_ticket != NULL);
 907
 908	/*
 909	 * If we need to update the superblock, then do it now.
 910	 */
 911	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 912		xfs_trans_apply_sb_deltas(tp);
 913	xfs_trans_apply_dquot_deltas(tp);
 914
 915	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 916
 917	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 918	xfs_trans_free(tp);
 919
 920	/*
 921	 * If the transaction needs to be synchronous, then force the
 922	 * log out now and wait for it.
 923	 */
 924	if (sync) {
 925		error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 926		XFS_STATS_INC(mp, xs_trans_sync);
 927	} else {
 928		XFS_STATS_INC(mp, xs_trans_async);
 929	}
 930
 931	return error;
 932
 933out_unreserve:
 934	xfs_trans_unreserve_and_mod_sb(tp);
 935
 936	/*
 937	 * It is indeed possible for the transaction to be not dirty but
 938	 * the dqinfo portion to be.  All that means is that we have some
 939	 * (non-persistent) quota reservations that need to be unreserved.
 940	 */
 941	xfs_trans_unreserve_and_mod_dquots(tp);
 942	if (tp->t_ticket) {
 943		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
 944		if (commit_lsn == -1 && !error)
 945			error = -EIO;
 946	}
 947	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 948	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
 949	xfs_trans_free(tp);
 950
 951	XFS_STATS_INC(mp, xs_trans_empty);
 952	return error;
 953}
 954
 955int
 956xfs_trans_commit(
 957	struct xfs_trans	*tp)
 958{
 959	return __xfs_trans_commit(tp, false);
 960}
 961
 962/*
 963 * Unlock all of the transaction's items and free the transaction.
 964 * The transaction must not have modified any of its items, because
 965 * there is no way to restore them to their previous state.
 966 *
 967 * If the transaction has made a log reservation, make sure to release
 968 * it as well.
 969 */
 970void
 971xfs_trans_cancel(
 972	struct xfs_trans	*tp)
 973{
 974	struct xfs_mount	*mp = tp->t_mountp;
 975	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
 976
 977	/*
 978	 * See if the caller is relying on us to shut down the
 979	 * filesystem.  This happens in paths where we detect
 980	 * corruption and decide to give up.
 981	 */
 982	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 983		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
 984		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 985	}
 986#ifdef DEBUG
 987	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 988		struct xfs_log_item_desc *lidp;
 989
 990		list_for_each_entry(lidp, &tp->t_items, lid_trans)
 991			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
 992	}
 993#endif
 994	xfs_trans_unreserve_and_mod_sb(tp);
 995	xfs_trans_unreserve_and_mod_dquots(tp);
 996
 997	if (tp->t_ticket)
 998		xfs_log_done(mp, tp->t_ticket, NULL, false);
 999
1000	/* mark this thread as no longer being in a transaction */
1001	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1002
1003	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1004	xfs_trans_free(tp);
1005}
1006
1007/*
1008 * Roll from one trans in the sequence of PERMANENT transactions to
1009 * the next: permanent transactions are only flushed out when
1010 * committed with xfs_trans_commit(), but we still want as soon
1011 * as possible to let chunks of it go to the log. So we commit the
1012 * chunk we've been working on and get a new transaction to continue.
1013 */
1014int
1015__xfs_trans_roll(
1016	struct xfs_trans	**tpp,
1017	struct xfs_inode	*dp,
1018	int			*committed)
1019{
1020	struct xfs_trans	*trans;
1021	struct xfs_trans_res	tres;
1022	int			error;
1023
1024	*committed = 0;
1025
1026	/*
1027	 * Ensure that the inode is always logged.
1028	 */
1029	trans = *tpp;
1030	if (dp)
1031		xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1032
1033	/*
1034	 * Copy the critical parameters from one trans to the next.
1035	 */
1036	tres.tr_logres = trans->t_log_res;
1037	tres.tr_logcount = trans->t_log_count;
 
1038	*tpp = xfs_trans_dup(trans);
1039
1040	/*
1041	 * Commit the current transaction.
1042	 * If this commit failed, then it'd just unlock those items that
1043	 * are not marked ihold. That also means that a filesystem shutdown
1044	 * is in progress. The caller takes the responsibility to cancel
1045	 * the duplicate transaction that gets returned.
1046	 */
1047	error = __xfs_trans_commit(trans, true);
1048	if (error)
1049		return error;
1050
1051	*committed = 1;
1052	trans = *tpp;
1053
1054	/*
1055	 * Reserve space in the log for th next transaction.
1056	 * This also pushes items in the "AIL", the list of logged items,
1057	 * out to disk if they are taking up space at the tail of the log
1058	 * that we want to use.  This requires that either nothing be locked
1059	 * across this call, or that anything that is locked be logged in
1060	 * the prior and the next transactions.
1061	 */
1062	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1063	error = xfs_trans_reserve(trans, &tres, 0, 0);
1064	/*
1065	 *  Ensure that the inode is in the new transaction and locked.
1066	 */
1067	if (error)
1068		return error;
1069
1070	if (dp)
1071		xfs_trans_ijoin(trans, dp, 0);
1072	return 0;
1073}
1074
1075int
1076xfs_trans_roll(
1077	struct xfs_trans	**tpp,
1078	struct xfs_inode	*dp)
1079{
1080	int			committed;
1081	return __xfs_trans_roll(tpp, dp, &committed);
1082}