Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_mount.h"
  26#include "xfs_inode.h"
  27#include "xfs_extent_busy.h"
  28#include "xfs_quota.h"
  29#include "xfs_trans.h"
  30#include "xfs_trans_priv.h"
  31#include "xfs_log.h"
  32#include "xfs_trace.h"
  33#include "xfs_error.h"
  34
  35kmem_zone_t	*xfs_trans_zone;
  36kmem_zone_t	*xfs_log_item_desc_zone;
  37
  38/*
  39 * Initialize the precomputed transaction reservation values
  40 * in the mount structure.
  41 */
  42void
  43xfs_trans_init(
  44	struct xfs_mount	*mp)
  45{
  46	xfs_trans_resv_calc(mp, M_RES(mp));
  47}
  48
  49/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50 * Free the transaction structure.  If there is more clean up
  51 * to do when the structure is freed, add it here.
  52 */
  53STATIC void
  54xfs_trans_free(
  55	struct xfs_trans	*tp)
  56{
  57	xfs_extent_busy_sort(&tp->t_busy);
  58	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  59
  60	atomic_dec(&tp->t_mountp->m_active_trans);
  61	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  62		sb_end_intwrite(tp->t_mountp->m_super);
  63	xfs_trans_free_dqinfo(tp);
  64	kmem_zone_free(xfs_trans_zone, tp);
  65}
  66
  67/*
  68 * This is called to create a new transaction which will share the
  69 * permanent log reservation of the given transaction.  The remaining
  70 * unused block and rt extent reservations are also inherited.  This
  71 * implies that the original transaction is no longer allowed to allocate
  72 * blocks.  Locks and log items, however, are no inherited.  They must
  73 * be added to the new transaction explicitly.
  74 */
  75STATIC xfs_trans_t *
  76xfs_trans_dup(
  77	xfs_trans_t	*tp)
  78{
  79	xfs_trans_t	*ntp;
  80
  81	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
  82
  83	/*
  84	 * Initialize the new transaction structure.
  85	 */
  86	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 
  87	ntp->t_mountp = tp->t_mountp;
  88	INIT_LIST_HEAD(&ntp->t_items);
  89	INIT_LIST_HEAD(&ntp->t_busy);
  90
  91	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  92	ASSERT(tp->t_ticket != NULL);
  93
  94	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
  95		       (tp->t_flags & XFS_TRANS_RESERVE) |
  96		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
  97	/* We gave our writer reference to the new transaction */
  98	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
  99	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 100	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 101	tp->t_blk_res = tp->t_blk_res_used;
 102	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 103	tp->t_rtx_res = tp->t_rtx_res_used;
 104	ntp->t_pflags = tp->t_pflags;
 105
 106	xfs_trans_dup_dqinfo(tp, ntp);
 107
 108	atomic_inc(&tp->t_mountp->m_active_trans);
 109	return ntp;
 110}
 111
 112/*
 113 * This is called to reserve free disk blocks and log space for the
 114 * given transaction.  This must be done before allocating any resources
 115 * within the transaction.
 116 *
 117 * This will return ENOSPC if there are not enough blocks available.
 118 * It will sleep waiting for available log space.
 119 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 120 * is used by long running transactions.  If any one of the reservations
 121 * fails then they will all be backed out.
 122 *
 123 * This does not do quota reservations. That typically is done by the
 124 * caller afterwards.
 125 */
 126static int
 127xfs_trans_reserve(
 128	struct xfs_trans	*tp,
 129	struct xfs_trans_res	*resp,
 130	uint			blocks,
 131	uint			rtextents)
 132{
 133	int		error = 0;
 134	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 135
 136	/* Mark this thread as being in a transaction */
 137	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
 138
 139	/*
 140	 * Attempt to reserve the needed disk blocks by decrementing
 141	 * the number needed from the number available.  This will
 142	 * fail if the count would go below zero.
 143	 */
 144	if (blocks > 0) {
 145		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 146		if (error != 0) {
 147			current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 148			return -ENOSPC;
 149		}
 150		tp->t_blk_res += blocks;
 151	}
 152
 153	/*
 154	 * Reserve the log space needed for this transaction.
 155	 */
 156	if (resp->tr_logres > 0) {
 157		bool	permanent = false;
 158
 159		ASSERT(tp->t_log_res == 0 ||
 160		       tp->t_log_res == resp->tr_logres);
 161		ASSERT(tp->t_log_count == 0 ||
 162		       tp->t_log_count == resp->tr_logcount);
 163
 164		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 165			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 166			permanent = true;
 167		} else {
 168			ASSERT(tp->t_ticket == NULL);
 169			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 170		}
 171
 172		if (tp->t_ticket != NULL) {
 173			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 174			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 175		} else {
 176			error = xfs_log_reserve(tp->t_mountp,
 177						resp->tr_logres,
 178						resp->tr_logcount,
 179						&tp->t_ticket, XFS_TRANSACTION,
 180						permanent);
 181		}
 182
 183		if (error)
 184			goto undo_blocks;
 185
 186		tp->t_log_res = resp->tr_logres;
 187		tp->t_log_count = resp->tr_logcount;
 188	}
 189
 190	/*
 191	 * Attempt to reserve the needed realtime extents by decrementing
 192	 * the number needed from the number available.  This will
 193	 * fail if the count would go below zero.
 194	 */
 195	if (rtextents > 0) {
 196		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
 197		if (error) {
 198			error = -ENOSPC;
 199			goto undo_log;
 200		}
 201		tp->t_rtx_res += rtextents;
 202	}
 203
 204	return 0;
 205
 206	/*
 207	 * Error cases jump to one of these labels to undo any
 208	 * reservations which have already been performed.
 209	 */
 210undo_log:
 211	if (resp->tr_logres > 0) {
 212		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
 213		tp->t_ticket = NULL;
 214		tp->t_log_res = 0;
 215		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 216	}
 217
 218undo_blocks:
 219	if (blocks > 0) {
 220		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
 221		tp->t_blk_res = 0;
 222	}
 223
 224	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 225
 226	return error;
 227}
 228
 229int
 230xfs_trans_alloc(
 231	struct xfs_mount	*mp,
 232	struct xfs_trans_res	*resp,
 233	uint			blocks,
 234	uint			rtextents,
 235	uint			flags,
 236	struct xfs_trans	**tpp)
 237{
 238	struct xfs_trans	*tp;
 239	int			error;
 240
 241	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 242		sb_start_intwrite(mp->m_super);
 243
 244	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 245	atomic_inc(&mp->m_active_trans);
 246
 247	tp = kmem_zone_zalloc(xfs_trans_zone,
 248		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
 249	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 250	tp->t_flags = flags;
 251	tp->t_mountp = mp;
 252	INIT_LIST_HEAD(&tp->t_items);
 253	INIT_LIST_HEAD(&tp->t_busy);
 254
 255	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 256	if (error) {
 257		xfs_trans_cancel(tp);
 258		return error;
 259	}
 260
 261	*tpp = tp;
 262	return 0;
 263}
 264
 265/*
 266 * Record the indicated change to the given field for application
 267 * to the file system's superblock when the transaction commits.
 268 * For now, just store the change in the transaction structure.
 269 *
 270 * Mark the transaction structure to indicate that the superblock
 271 * needs to be updated before committing.
 272 *
 273 * Because we may not be keeping track of allocated/free inodes and
 274 * used filesystem blocks in the superblock, we do not mark the
 275 * superblock dirty in this transaction if we modify these fields.
 276 * We still need to update the transaction deltas so that they get
 277 * applied to the incore superblock, but we don't want them to
 278 * cause the superblock to get locked and logged if these are the
 279 * only fields in the superblock that the transaction modifies.
 280 */
 281void
 282xfs_trans_mod_sb(
 283	xfs_trans_t	*tp,
 284	uint		field,
 285	int64_t		delta)
 286{
 287	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 288	xfs_mount_t	*mp = tp->t_mountp;
 289
 290	switch (field) {
 291	case XFS_TRANS_SB_ICOUNT:
 292		tp->t_icount_delta += delta;
 293		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 294			flags &= ~XFS_TRANS_SB_DIRTY;
 295		break;
 296	case XFS_TRANS_SB_IFREE:
 297		tp->t_ifree_delta += delta;
 298		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 299			flags &= ~XFS_TRANS_SB_DIRTY;
 300		break;
 301	case XFS_TRANS_SB_FDBLOCKS:
 302		/*
 303		 * Track the number of blocks allocated in the
 304		 * transaction.  Make sure it does not exceed the
 305		 * number reserved.
 306		 */
 307		if (delta < 0) {
 308			tp->t_blk_res_used += (uint)-delta;
 309			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
 310		}
 311		tp->t_fdblocks_delta += delta;
 312		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 313			flags &= ~XFS_TRANS_SB_DIRTY;
 314		break;
 315	case XFS_TRANS_SB_RES_FDBLOCKS:
 316		/*
 317		 * The allocation has already been applied to the
 318		 * in-core superblock's counter.  This should only
 319		 * be applied to the on-disk superblock.
 320		 */
 
 321		tp->t_res_fdblocks_delta += delta;
 322		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 323			flags &= ~XFS_TRANS_SB_DIRTY;
 324		break;
 325	case XFS_TRANS_SB_FREXTENTS:
 326		/*
 327		 * Track the number of blocks allocated in the
 328		 * transaction.  Make sure it does not exceed the
 329		 * number reserved.
 330		 */
 331		if (delta < 0) {
 332			tp->t_rtx_res_used += (uint)-delta;
 333			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 334		}
 335		tp->t_frextents_delta += delta;
 336		break;
 337	case XFS_TRANS_SB_RES_FREXTENTS:
 338		/*
 339		 * The allocation has already been applied to the
 340		 * in-core superblock's counter.  This should only
 341		 * be applied to the on-disk superblock.
 342		 */
 343		ASSERT(delta < 0);
 344		tp->t_res_frextents_delta += delta;
 345		break;
 346	case XFS_TRANS_SB_DBLOCKS:
 347		ASSERT(delta > 0);
 348		tp->t_dblocks_delta += delta;
 349		break;
 350	case XFS_TRANS_SB_AGCOUNT:
 351		ASSERT(delta > 0);
 352		tp->t_agcount_delta += delta;
 353		break;
 354	case XFS_TRANS_SB_IMAXPCT:
 355		tp->t_imaxpct_delta += delta;
 356		break;
 357	case XFS_TRANS_SB_REXTSIZE:
 358		tp->t_rextsize_delta += delta;
 359		break;
 360	case XFS_TRANS_SB_RBMBLOCKS:
 361		tp->t_rbmblocks_delta += delta;
 362		break;
 363	case XFS_TRANS_SB_RBLOCKS:
 364		tp->t_rblocks_delta += delta;
 365		break;
 366	case XFS_TRANS_SB_REXTENTS:
 367		tp->t_rextents_delta += delta;
 368		break;
 369	case XFS_TRANS_SB_REXTSLOG:
 370		tp->t_rextslog_delta += delta;
 371		break;
 372	default:
 373		ASSERT(0);
 374		return;
 375	}
 376
 377	tp->t_flags |= flags;
 378}
 379
 380/*
 381 * xfs_trans_apply_sb_deltas() is called from the commit code
 382 * to bring the superblock buffer into the current transaction
 383 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 384 *
 385 * For now we just look at each field allowed to change and change
 386 * it if necessary.
 387 */
 388STATIC void
 389xfs_trans_apply_sb_deltas(
 390	xfs_trans_t	*tp)
 391{
 392	xfs_dsb_t	*sbp;
 393	xfs_buf_t	*bp;
 394	int		whole = 0;
 395
 396	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 397	sbp = XFS_BUF_TO_SBP(bp);
 398
 399	/*
 400	 * Check that superblock mods match the mods made to AGF counters.
 401	 */
 402	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 403	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 404		tp->t_ag_btree_delta));
 405
 406	/*
 407	 * Only update the superblock counters if we are logging them
 408	 */
 409	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 410		if (tp->t_icount_delta)
 411			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 412		if (tp->t_ifree_delta)
 413			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 414		if (tp->t_fdblocks_delta)
 415			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 416		if (tp->t_res_fdblocks_delta)
 417			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 418	}
 419
 420	if (tp->t_frextents_delta)
 421		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 422	if (tp->t_res_frextents_delta)
 423		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 424
 425	if (tp->t_dblocks_delta) {
 426		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 427		whole = 1;
 428	}
 429	if (tp->t_agcount_delta) {
 430		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 431		whole = 1;
 432	}
 433	if (tp->t_imaxpct_delta) {
 434		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 435		whole = 1;
 436	}
 437	if (tp->t_rextsize_delta) {
 438		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 439		whole = 1;
 440	}
 441	if (tp->t_rbmblocks_delta) {
 442		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 443		whole = 1;
 444	}
 445	if (tp->t_rblocks_delta) {
 446		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 447		whole = 1;
 448	}
 449	if (tp->t_rextents_delta) {
 450		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 451		whole = 1;
 452	}
 453	if (tp->t_rextslog_delta) {
 454		sbp->sb_rextslog += tp->t_rextslog_delta;
 455		whole = 1;
 456	}
 457
 458	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 459	if (whole)
 460		/*
 461		 * Log the whole thing, the fields are noncontiguous.
 462		 */
 463		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 464	else
 465		/*
 466		 * Since all the modifiable fields are contiguous, we
 467		 * can get away with this.
 468		 */
 469		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 470				  offsetof(xfs_dsb_t, sb_frextents) +
 471				  sizeof(sbp->sb_frextents) - 1);
 472}
 473
 474STATIC int
 475xfs_sb_mod8(
 476	uint8_t			*field,
 477	int8_t			delta)
 478{
 479	int8_t			counter = *field;
 480
 481	counter += delta;
 482	if (counter < 0) {
 483		ASSERT(0);
 484		return -EINVAL;
 485	}
 486	*field = counter;
 487	return 0;
 488}
 489
 490STATIC int
 491xfs_sb_mod32(
 492	uint32_t		*field,
 493	int32_t			delta)
 494{
 495	int32_t			counter = *field;
 496
 497	counter += delta;
 498	if (counter < 0) {
 499		ASSERT(0);
 500		return -EINVAL;
 501	}
 502	*field = counter;
 503	return 0;
 504}
 505
 506STATIC int
 507xfs_sb_mod64(
 508	uint64_t		*field,
 509	int64_t			delta)
 510{
 511	int64_t			counter = *field;
 512
 513	counter += delta;
 514	if (counter < 0) {
 515		ASSERT(0);
 516		return -EINVAL;
 517	}
 518	*field = counter;
 519	return 0;
 520}
 521
 522/*
 523 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 524 * and apply superblock counter changes to the in-core superblock.  The
 525 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 526 * applied to the in-core superblock.  The idea is that that has already been
 527 * done.
 528 *
 529 * If we are not logging superblock counters, then the inode allocated/free and
 530 * used block counts are not updated in the on disk superblock. In this case,
 531 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 532 * still need to update the incore superblock with the changes.
 533 */
 534void
 535xfs_trans_unreserve_and_mod_sb(
 536	struct xfs_trans	*tp)
 537{
 538	struct xfs_mount	*mp = tp->t_mountp;
 539	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 540	int64_t			blkdelta = 0;
 541	int64_t			rtxdelta = 0;
 542	int64_t			idelta = 0;
 543	int64_t			ifreedelta = 0;
 544	int			error;
 545
 546	/* calculate deltas */
 547	if (tp->t_blk_res > 0)
 548		blkdelta = tp->t_blk_res;
 549	if ((tp->t_fdblocks_delta != 0) &&
 550	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 551	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 552	        blkdelta += tp->t_fdblocks_delta;
 553
 554	if (tp->t_rtx_res > 0)
 555		rtxdelta = tp->t_rtx_res;
 556	if ((tp->t_frextents_delta != 0) &&
 557	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 558		rtxdelta += tp->t_frextents_delta;
 559
 560	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 561	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 562		idelta = tp->t_icount_delta;
 563		ifreedelta = tp->t_ifree_delta;
 564	}
 565
 566	/* apply the per-cpu counters */
 567	if (blkdelta) {
 568		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 569		if (error)
 570			goto out;
 571	}
 572
 573	if (idelta) {
 574		error = xfs_mod_icount(mp, idelta);
 575		if (error)
 576			goto out_undo_fdblocks;
 577	}
 578
 579	if (ifreedelta) {
 580		error = xfs_mod_ifree(mp, ifreedelta);
 581		if (error)
 582			goto out_undo_icount;
 583	}
 584
 585	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 586		return;
 587
 588	/* apply remaining deltas */
 589	spin_lock(&mp->m_sb_lock);
 590	if (rtxdelta) {
 591		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
 592		if (error)
 593			goto out_undo_ifree;
 594	}
 595
 596	if (tp->t_dblocks_delta != 0) {
 597		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
 598		if (error)
 599			goto out_undo_frextents;
 600	}
 601	if (tp->t_agcount_delta != 0) {
 602		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
 603		if (error)
 604			goto out_undo_dblocks;
 605	}
 606	if (tp->t_imaxpct_delta != 0) {
 607		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
 608		if (error)
 609			goto out_undo_agcount;
 610	}
 611	if (tp->t_rextsize_delta != 0) {
 612		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
 613				     tp->t_rextsize_delta);
 614		if (error)
 615			goto out_undo_imaxpct;
 616	}
 617	if (tp->t_rbmblocks_delta != 0) {
 618		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
 619				     tp->t_rbmblocks_delta);
 620		if (error)
 621			goto out_undo_rextsize;
 622	}
 623	if (tp->t_rblocks_delta != 0) {
 624		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
 625		if (error)
 626			goto out_undo_rbmblocks;
 627	}
 628	if (tp->t_rextents_delta != 0) {
 629		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
 630				     tp->t_rextents_delta);
 631		if (error)
 632			goto out_undo_rblocks;
 633	}
 634	if (tp->t_rextslog_delta != 0) {
 635		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
 636				     tp->t_rextslog_delta);
 637		if (error)
 638			goto out_undo_rextents;
 639	}
 640	spin_unlock(&mp->m_sb_lock);
 641	return;
 642
 643out_undo_rextents:
 644	if (tp->t_rextents_delta)
 645		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
 646out_undo_rblocks:
 647	if (tp->t_rblocks_delta)
 648		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
 649out_undo_rbmblocks:
 650	if (tp->t_rbmblocks_delta)
 651		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
 652out_undo_rextsize:
 653	if (tp->t_rextsize_delta)
 654		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
 655out_undo_imaxpct:
 656	if (tp->t_rextsize_delta)
 657		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
 658out_undo_agcount:
 659	if (tp->t_agcount_delta)
 660		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
 661out_undo_dblocks:
 662	if (tp->t_dblocks_delta)
 663		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
 664out_undo_frextents:
 665	if (rtxdelta)
 666		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
 667out_undo_ifree:
 668	spin_unlock(&mp->m_sb_lock);
 669	if (ifreedelta)
 670		xfs_mod_ifree(mp, -ifreedelta);
 671out_undo_icount:
 672	if (idelta)
 673		xfs_mod_icount(mp, -idelta);
 674out_undo_fdblocks:
 675	if (blkdelta)
 676		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
 677out:
 678	ASSERT(error == 0);
 679	return;
 680}
 681
 682/*
 683 * Add the given log item to the transaction's list of log items.
 684 *
 685 * The log item will now point to its new descriptor with its li_desc field.
 686 */
 687void
 688xfs_trans_add_item(
 689	struct xfs_trans	*tp,
 690	struct xfs_log_item	*lip)
 691{
 692	struct xfs_log_item_desc *lidp;
 693
 694	ASSERT(lip->li_mountp == tp->t_mountp);
 695	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 696
 697	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
 698
 699	lidp->lid_item = lip;
 700	lidp->lid_flags = 0;
 701	list_add_tail(&lidp->lid_trans, &tp->t_items);
 702
 703	lip->li_desc = lidp;
 704}
 705
 706STATIC void
 707xfs_trans_free_item_desc(
 708	struct xfs_log_item_desc *lidp)
 709{
 710	list_del_init(&lidp->lid_trans);
 711	kmem_zone_free(xfs_log_item_desc_zone, lidp);
 712}
 713
 714/*
 715 * Unlink and free the given descriptor.
 716 */
 717void
 718xfs_trans_del_item(
 719	struct xfs_log_item	*lip)
 720{
 721	xfs_trans_free_item_desc(lip->li_desc);
 722	lip->li_desc = NULL;
 723}
 724
 725/*
 726 * Unlock all of the items of a transaction and free all the descriptors
 727 * of that transaction.
 728 */
 729void
 730xfs_trans_free_items(
 731	struct xfs_trans	*tp,
 732	xfs_lsn_t		commit_lsn,
 733	bool			abort)
 734{
 735	struct xfs_log_item_desc *lidp, *next;
 736
 737	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
 738		struct xfs_log_item	*lip = lidp->lid_item;
 739
 740		lip->li_desc = NULL;
 741
 742		if (commit_lsn != NULLCOMMITLSN)
 743			lip->li_ops->iop_committing(lip, commit_lsn);
 744		if (abort)
 745			lip->li_flags |= XFS_LI_ABORTED;
 746		lip->li_ops->iop_unlock(lip);
 747
 748		xfs_trans_free_item_desc(lidp);
 749	}
 750}
 751
 752static inline void
 753xfs_log_item_batch_insert(
 754	struct xfs_ail		*ailp,
 755	struct xfs_ail_cursor	*cur,
 756	struct xfs_log_item	**log_items,
 757	int			nr_items,
 758	xfs_lsn_t		commit_lsn)
 759{
 760	int	i;
 761
 762	spin_lock(&ailp->xa_lock);
 763	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
 764	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 765
 766	for (i = 0; i < nr_items; i++) {
 767		struct xfs_log_item *lip = log_items[i];
 768
 769		lip->li_ops->iop_unpin(lip, 0);
 770	}
 771}
 772
 773/*
 774 * Bulk operation version of xfs_trans_committed that takes a log vector of
 775 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 776 * minimise lock traffic.
 777 *
 778 * If we are called with the aborted flag set, it is because a log write during
 779 * a CIL checkpoint commit has failed. In this case, all the items in the
 780 * checkpoint have already gone through iop_commited and iop_unlock, which
 781 * means that checkpoint commit abort handling is treated exactly the same
 782 * as an iclog write error even though we haven't started any IO yet. Hence in
 783 * this case all we need to do is iop_committed processing, followed by an
 784 * iop_unpin(aborted) call.
 785 *
 786 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 787 * at the end of the AIL, the insert cursor avoids the need to walk
 788 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 789 * call. This saves a lot of needless list walking and is a net win, even
 790 * though it slightly increases that amount of AIL lock traffic to set it up
 791 * and tear it down.
 792 */
 793void
 794xfs_trans_committed_bulk(
 795	struct xfs_ail		*ailp,
 796	struct xfs_log_vec	*log_vector,
 797	xfs_lsn_t		commit_lsn,
 798	int			aborted)
 799{
 800#define LOG_ITEM_BATCH_SIZE	32
 801	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 802	struct xfs_log_vec	*lv;
 803	struct xfs_ail_cursor	cur;
 804	int			i = 0;
 805
 806	spin_lock(&ailp->xa_lock);
 807	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 808	spin_unlock(&ailp->xa_lock);
 809
 810	/* unpin all the log items */
 811	for (lv = log_vector; lv; lv = lv->lv_next ) {
 812		struct xfs_log_item	*lip = lv->lv_item;
 813		xfs_lsn_t		item_lsn;
 814
 815		if (aborted)
 816			lip->li_flags |= XFS_LI_ABORTED;
 817		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 818
 819		/* item_lsn of -1 means the item needs no further processing */
 820		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 821			continue;
 822
 823		/*
 824		 * if we are aborting the operation, no point in inserting the
 825		 * object into the AIL as we are in a shutdown situation.
 826		 */
 827		if (aborted) {
 828			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
 829			lip->li_ops->iop_unpin(lip, 1);
 830			continue;
 831		}
 832
 833		if (item_lsn != commit_lsn) {
 834
 835			/*
 836			 * Not a bulk update option due to unusual item_lsn.
 837			 * Push into AIL immediately, rechecking the lsn once
 838			 * we have the ail lock. Then unpin the item. This does
 839			 * not affect the AIL cursor the bulk insert path is
 840			 * using.
 841			 */
 842			spin_lock(&ailp->xa_lock);
 843			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 844				xfs_trans_ail_update(ailp, lip, item_lsn);
 845			else
 846				spin_unlock(&ailp->xa_lock);
 847			lip->li_ops->iop_unpin(lip, 0);
 848			continue;
 849		}
 850
 851		/* Item is a candidate for bulk AIL insert.  */
 852		log_items[i++] = lv->lv_item;
 853		if (i >= LOG_ITEM_BATCH_SIZE) {
 854			xfs_log_item_batch_insert(ailp, &cur, log_items,
 855					LOG_ITEM_BATCH_SIZE, commit_lsn);
 856			i = 0;
 857		}
 858	}
 859
 860	/* make sure we insert the remainder! */
 861	if (i)
 862		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 863
 864	spin_lock(&ailp->xa_lock);
 865	xfs_trans_ail_cursor_done(&cur);
 866	spin_unlock(&ailp->xa_lock);
 867}
 868
 869/*
 870 * Commit the given transaction to the log.
 871 *
 872 * XFS disk error handling mechanism is not based on a typical
 873 * transaction abort mechanism. Logically after the filesystem
 874 * gets marked 'SHUTDOWN', we can't let any new transactions
 875 * be durable - ie. committed to disk - because some metadata might
 876 * be inconsistent. In such cases, this returns an error, and the
 877 * caller may assume that all locked objects joined to the transaction
 878 * have already been unlocked as if the commit had succeeded.
 879 * Do not reference the transaction structure after this call.
 880 */
 881static int
 882__xfs_trans_commit(
 883	struct xfs_trans	*tp,
 884	bool			regrant)
 885{
 886	struct xfs_mount	*mp = tp->t_mountp;
 887	xfs_lsn_t		commit_lsn = -1;
 888	int			error = 0;
 889	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 890
 891	/*
 892	 * If there is nothing to be logged by the transaction,
 893	 * then unlock all of the items associated with the
 894	 * transaction and free the transaction structure.
 895	 * Also make sure to return any reserved blocks to
 896	 * the free pool.
 897	 */
 898	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 899		goto out_unreserve;
 900
 901	if (XFS_FORCED_SHUTDOWN(mp)) {
 902		error = -EIO;
 903		goto out_unreserve;
 904	}
 905
 906	ASSERT(tp->t_ticket != NULL);
 907
 908	/*
 909	 * If we need to update the superblock, then do it now.
 910	 */
 911	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 912		xfs_trans_apply_sb_deltas(tp);
 913	xfs_trans_apply_dquot_deltas(tp);
 914
 915	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 916
 917	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 918	xfs_trans_free(tp);
 919
 920	/*
 921	 * If the transaction needs to be synchronous, then force the
 922	 * log out now and wait for it.
 923	 */
 924	if (sync) {
 925		error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 926		XFS_STATS_INC(mp, xs_trans_sync);
 927	} else {
 928		XFS_STATS_INC(mp, xs_trans_async);
 929	}
 930
 931	return error;
 932
 933out_unreserve:
 934	xfs_trans_unreserve_and_mod_sb(tp);
 935
 936	/*
 937	 * It is indeed possible for the transaction to be not dirty but
 938	 * the dqinfo portion to be.  All that means is that we have some
 939	 * (non-persistent) quota reservations that need to be unreserved.
 940	 */
 941	xfs_trans_unreserve_and_mod_dquots(tp);
 942	if (tp->t_ticket) {
 943		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
 944		if (commit_lsn == -1 && !error)
 945			error = -EIO;
 946	}
 947	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 948	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
 949	xfs_trans_free(tp);
 950
 951	XFS_STATS_INC(mp, xs_trans_empty);
 952	return error;
 953}
 954
 955int
 956xfs_trans_commit(
 957	struct xfs_trans	*tp)
 958{
 959	return __xfs_trans_commit(tp, false);
 960}
 961
 962/*
 963 * Unlock all of the transaction's items and free the transaction.
 964 * The transaction must not have modified any of its items, because
 965 * there is no way to restore them to their previous state.
 966 *
 967 * If the transaction has made a log reservation, make sure to release
 968 * it as well.
 969 */
 970void
 971xfs_trans_cancel(
 972	struct xfs_trans	*tp)
 973{
 974	struct xfs_mount	*mp = tp->t_mountp;
 975	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
 976
 977	/*
 978	 * See if the caller is relying on us to shut down the
 979	 * filesystem.  This happens in paths where we detect
 980	 * corruption and decide to give up.
 981	 */
 982	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 983		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
 984		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 985	}
 986#ifdef DEBUG
 987	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 988		struct xfs_log_item_desc *lidp;
 989
 990		list_for_each_entry(lidp, &tp->t_items, lid_trans)
 991			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
 992	}
 993#endif
 994	xfs_trans_unreserve_and_mod_sb(tp);
 995	xfs_trans_unreserve_and_mod_dquots(tp);
 996
 997	if (tp->t_ticket)
 998		xfs_log_done(mp, tp->t_ticket, NULL, false);
 999
1000	/* mark this thread as no longer being in a transaction */
1001	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1002
1003	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1004	xfs_trans_free(tp);
1005}
1006
1007/*
1008 * Roll from one trans in the sequence of PERMANENT transactions to
1009 * the next: permanent transactions are only flushed out when
1010 * committed with xfs_trans_commit(), but we still want as soon
1011 * as possible to let chunks of it go to the log. So we commit the
1012 * chunk we've been working on and get a new transaction to continue.
1013 */
1014int
1015__xfs_trans_roll(
1016	struct xfs_trans	**tpp,
1017	struct xfs_inode	*dp,
1018	int			*committed)
1019{
1020	struct xfs_trans	*trans;
1021	struct xfs_trans_res	tres;
1022	int			error;
1023
1024	*committed = 0;
1025
1026	/*
1027	 * Ensure that the inode is always logged.
1028	 */
1029	trans = *tpp;
1030	if (dp)
1031		xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1032
1033	/*
1034	 * Copy the critical parameters from one trans to the next.
1035	 */
1036	tres.tr_logres = trans->t_log_res;
1037	tres.tr_logcount = trans->t_log_count;
1038	*tpp = xfs_trans_dup(trans);
1039
1040	/*
1041	 * Commit the current transaction.
1042	 * If this commit failed, then it'd just unlock those items that
1043	 * are not marked ihold. That also means that a filesystem shutdown
1044	 * is in progress. The caller takes the responsibility to cancel
1045	 * the duplicate transaction that gets returned.
1046	 */
1047	error = __xfs_trans_commit(trans, true);
1048	if (error)
1049		return error;
1050
1051	*committed = 1;
1052	trans = *tpp;
1053
1054	/*
1055	 * Reserve space in the log for th next transaction.
1056	 * This also pushes items in the "AIL", the list of logged items,
1057	 * out to disk if they are taking up space at the tail of the log
1058	 * that we want to use.  This requires that either nothing be locked
1059	 * across this call, or that anything that is locked be logged in
1060	 * the prior and the next transactions.
1061	 */
1062	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1063	error = xfs_trans_reserve(trans, &tres, 0, 0);
1064	/*
1065	 *  Ensure that the inode is in the new transaction and locked.
1066	 */
1067	if (error)
1068		return error;
1069
1070	if (dp)
1071		xfs_trans_ijoin(trans, dp, 0);
1072	return 0;
1073}
1074
1075int
1076xfs_trans_roll(
1077	struct xfs_trans	**tpp,
1078	struct xfs_inode	*dp)
1079{
1080	int			committed;
1081	return __xfs_trans_roll(tpp, dp, &committed);
1082}
v4.6
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_mount.h"
  26#include "xfs_inode.h"
  27#include "xfs_extent_busy.h"
  28#include "xfs_quota.h"
  29#include "xfs_trans.h"
  30#include "xfs_trans_priv.h"
  31#include "xfs_log.h"
  32#include "xfs_trace.h"
  33#include "xfs_error.h"
  34
  35kmem_zone_t	*xfs_trans_zone;
  36kmem_zone_t	*xfs_log_item_desc_zone;
  37
  38/*
  39 * Initialize the precomputed transaction reservation values
  40 * in the mount structure.
  41 */
  42void
  43xfs_trans_init(
  44	struct xfs_mount	*mp)
  45{
  46	xfs_trans_resv_calc(mp, M_RES(mp));
  47}
  48
  49/*
  50 * This routine is called to allocate a transaction structure.
  51 * The type parameter indicates the type of the transaction.  These
  52 * are enumerated in xfs_trans.h.
  53 *
  54 * Dynamically allocate the transaction structure from the transaction
  55 * zone, initialize it, and return it to the caller.
  56 */
  57xfs_trans_t *
  58xfs_trans_alloc(
  59	xfs_mount_t	*mp,
  60	uint		type)
  61{
  62	xfs_trans_t     *tp;
  63
  64	sb_start_intwrite(mp->m_super);
  65	tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
  66	tp->t_flags |= XFS_TRANS_FREEZE_PROT;
  67	return tp;
  68}
  69
  70xfs_trans_t *
  71_xfs_trans_alloc(
  72	xfs_mount_t	*mp,
  73	uint		type,
  74	xfs_km_flags_t	memflags)
  75{
  76	xfs_trans_t	*tp;
  77
  78	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
  79	atomic_inc(&mp->m_active_trans);
  80
  81	tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
  82	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
  83	tp->t_type = type;
  84	tp->t_mountp = mp;
  85	INIT_LIST_HEAD(&tp->t_items);
  86	INIT_LIST_HEAD(&tp->t_busy);
  87	return tp;
  88}
  89
  90/*
  91 * Free the transaction structure.  If there is more clean up
  92 * to do when the structure is freed, add it here.
  93 */
  94STATIC void
  95xfs_trans_free(
  96	struct xfs_trans	*tp)
  97{
  98	xfs_extent_busy_sort(&tp->t_busy);
  99	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
 100
 101	atomic_dec(&tp->t_mountp->m_active_trans);
 102	if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
 103		sb_end_intwrite(tp->t_mountp->m_super);
 104	xfs_trans_free_dqinfo(tp);
 105	kmem_zone_free(xfs_trans_zone, tp);
 106}
 107
 108/*
 109 * This is called to create a new transaction which will share the
 110 * permanent log reservation of the given transaction.  The remaining
 111 * unused block and rt extent reservations are also inherited.  This
 112 * implies that the original transaction is no longer allowed to allocate
 113 * blocks.  Locks and log items, however, are no inherited.  They must
 114 * be added to the new transaction explicitly.
 115 */
 116STATIC xfs_trans_t *
 117xfs_trans_dup(
 118	xfs_trans_t	*tp)
 119{
 120	xfs_trans_t	*ntp;
 121
 122	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
 123
 124	/*
 125	 * Initialize the new transaction structure.
 126	 */
 127	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 128	ntp->t_type = tp->t_type;
 129	ntp->t_mountp = tp->t_mountp;
 130	INIT_LIST_HEAD(&ntp->t_items);
 131	INIT_LIST_HEAD(&ntp->t_busy);
 132
 133	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 134	ASSERT(tp->t_ticket != NULL);
 135
 136	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 137		       (tp->t_flags & XFS_TRANS_RESERVE) |
 138		       (tp->t_flags & XFS_TRANS_FREEZE_PROT);
 139	/* We gave our writer reference to the new transaction */
 140	tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
 141	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 142	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 143	tp->t_blk_res = tp->t_blk_res_used;
 144	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 145	tp->t_rtx_res = tp->t_rtx_res_used;
 146	ntp->t_pflags = tp->t_pflags;
 147
 148	xfs_trans_dup_dqinfo(tp, ntp);
 149
 150	atomic_inc(&tp->t_mountp->m_active_trans);
 151	return ntp;
 152}
 153
 154/*
 155 * This is called to reserve free disk blocks and log space for the
 156 * given transaction.  This must be done before allocating any resources
 157 * within the transaction.
 158 *
 159 * This will return ENOSPC if there are not enough blocks available.
 160 * It will sleep waiting for available log space.
 161 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 162 * is used by long running transactions.  If any one of the reservations
 163 * fails then they will all be backed out.
 164 *
 165 * This does not do quota reservations. That typically is done by the
 166 * caller afterwards.
 167 */
 168int
 169xfs_trans_reserve(
 170	struct xfs_trans	*tp,
 171	struct xfs_trans_res	*resp,
 172	uint			blocks,
 173	uint			rtextents)
 174{
 175	int		error = 0;
 176	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 177
 178	/* Mark this thread as being in a transaction */
 179	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
 180
 181	/*
 182	 * Attempt to reserve the needed disk blocks by decrementing
 183	 * the number needed from the number available.  This will
 184	 * fail if the count would go below zero.
 185	 */
 186	if (blocks > 0) {
 187		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 188		if (error != 0) {
 189			current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 190			return -ENOSPC;
 191		}
 192		tp->t_blk_res += blocks;
 193	}
 194
 195	/*
 196	 * Reserve the log space needed for this transaction.
 197	 */
 198	if (resp->tr_logres > 0) {
 199		bool	permanent = false;
 200
 201		ASSERT(tp->t_log_res == 0 ||
 202		       tp->t_log_res == resp->tr_logres);
 203		ASSERT(tp->t_log_count == 0 ||
 204		       tp->t_log_count == resp->tr_logcount);
 205
 206		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 207			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 208			permanent = true;
 209		} else {
 210			ASSERT(tp->t_ticket == NULL);
 211			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 212		}
 213
 214		if (tp->t_ticket != NULL) {
 215			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 216			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 217		} else {
 218			error = xfs_log_reserve(tp->t_mountp,
 219						resp->tr_logres,
 220						resp->tr_logcount,
 221						&tp->t_ticket, XFS_TRANSACTION,
 222						permanent, tp->t_type);
 223		}
 224
 225		if (error)
 226			goto undo_blocks;
 227
 228		tp->t_log_res = resp->tr_logres;
 229		tp->t_log_count = resp->tr_logcount;
 230	}
 231
 232	/*
 233	 * Attempt to reserve the needed realtime extents by decrementing
 234	 * the number needed from the number available.  This will
 235	 * fail if the count would go below zero.
 236	 */
 237	if (rtextents > 0) {
 238		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
 239		if (error) {
 240			error = -ENOSPC;
 241			goto undo_log;
 242		}
 243		tp->t_rtx_res += rtextents;
 244	}
 245
 246	return 0;
 247
 248	/*
 249	 * Error cases jump to one of these labels to undo any
 250	 * reservations which have already been performed.
 251	 */
 252undo_log:
 253	if (resp->tr_logres > 0) {
 254		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
 255		tp->t_ticket = NULL;
 256		tp->t_log_res = 0;
 257		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 258	}
 259
 260undo_blocks:
 261	if (blocks > 0) {
 262		xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 263		tp->t_blk_res = 0;
 264	}
 265
 266	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 267
 268	return error;
 269}
 270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271/*
 272 * Record the indicated change to the given field for application
 273 * to the file system's superblock when the transaction commits.
 274 * For now, just store the change in the transaction structure.
 275 *
 276 * Mark the transaction structure to indicate that the superblock
 277 * needs to be updated before committing.
 278 *
 279 * Because we may not be keeping track of allocated/free inodes and
 280 * used filesystem blocks in the superblock, we do not mark the
 281 * superblock dirty in this transaction if we modify these fields.
 282 * We still need to update the transaction deltas so that they get
 283 * applied to the incore superblock, but we don't want them to
 284 * cause the superblock to get locked and logged if these are the
 285 * only fields in the superblock that the transaction modifies.
 286 */
 287void
 288xfs_trans_mod_sb(
 289	xfs_trans_t	*tp,
 290	uint		field,
 291	int64_t		delta)
 292{
 293	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 294	xfs_mount_t	*mp = tp->t_mountp;
 295
 296	switch (field) {
 297	case XFS_TRANS_SB_ICOUNT:
 298		tp->t_icount_delta += delta;
 299		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 300			flags &= ~XFS_TRANS_SB_DIRTY;
 301		break;
 302	case XFS_TRANS_SB_IFREE:
 303		tp->t_ifree_delta += delta;
 304		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 305			flags &= ~XFS_TRANS_SB_DIRTY;
 306		break;
 307	case XFS_TRANS_SB_FDBLOCKS:
 308		/*
 309		 * Track the number of blocks allocated in the
 310		 * transaction.  Make sure it does not exceed the
 311		 * number reserved.
 312		 */
 313		if (delta < 0) {
 314			tp->t_blk_res_used += (uint)-delta;
 315			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
 316		}
 317		tp->t_fdblocks_delta += delta;
 318		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 319			flags &= ~XFS_TRANS_SB_DIRTY;
 320		break;
 321	case XFS_TRANS_SB_RES_FDBLOCKS:
 322		/*
 323		 * The allocation has already been applied to the
 324		 * in-core superblock's counter.  This should only
 325		 * be applied to the on-disk superblock.
 326		 */
 327		ASSERT(delta < 0);
 328		tp->t_res_fdblocks_delta += delta;
 329		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 330			flags &= ~XFS_TRANS_SB_DIRTY;
 331		break;
 332	case XFS_TRANS_SB_FREXTENTS:
 333		/*
 334		 * Track the number of blocks allocated in the
 335		 * transaction.  Make sure it does not exceed the
 336		 * number reserved.
 337		 */
 338		if (delta < 0) {
 339			tp->t_rtx_res_used += (uint)-delta;
 340			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 341		}
 342		tp->t_frextents_delta += delta;
 343		break;
 344	case XFS_TRANS_SB_RES_FREXTENTS:
 345		/*
 346		 * The allocation has already been applied to the
 347		 * in-core superblock's counter.  This should only
 348		 * be applied to the on-disk superblock.
 349		 */
 350		ASSERT(delta < 0);
 351		tp->t_res_frextents_delta += delta;
 352		break;
 353	case XFS_TRANS_SB_DBLOCKS:
 354		ASSERT(delta > 0);
 355		tp->t_dblocks_delta += delta;
 356		break;
 357	case XFS_TRANS_SB_AGCOUNT:
 358		ASSERT(delta > 0);
 359		tp->t_agcount_delta += delta;
 360		break;
 361	case XFS_TRANS_SB_IMAXPCT:
 362		tp->t_imaxpct_delta += delta;
 363		break;
 364	case XFS_TRANS_SB_REXTSIZE:
 365		tp->t_rextsize_delta += delta;
 366		break;
 367	case XFS_TRANS_SB_RBMBLOCKS:
 368		tp->t_rbmblocks_delta += delta;
 369		break;
 370	case XFS_TRANS_SB_RBLOCKS:
 371		tp->t_rblocks_delta += delta;
 372		break;
 373	case XFS_TRANS_SB_REXTENTS:
 374		tp->t_rextents_delta += delta;
 375		break;
 376	case XFS_TRANS_SB_REXTSLOG:
 377		tp->t_rextslog_delta += delta;
 378		break;
 379	default:
 380		ASSERT(0);
 381		return;
 382	}
 383
 384	tp->t_flags |= flags;
 385}
 386
 387/*
 388 * xfs_trans_apply_sb_deltas() is called from the commit code
 389 * to bring the superblock buffer into the current transaction
 390 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 391 *
 392 * For now we just look at each field allowed to change and change
 393 * it if necessary.
 394 */
 395STATIC void
 396xfs_trans_apply_sb_deltas(
 397	xfs_trans_t	*tp)
 398{
 399	xfs_dsb_t	*sbp;
 400	xfs_buf_t	*bp;
 401	int		whole = 0;
 402
 403	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 404	sbp = XFS_BUF_TO_SBP(bp);
 405
 406	/*
 407	 * Check that superblock mods match the mods made to AGF counters.
 408	 */
 409	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 410	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 411		tp->t_ag_btree_delta));
 412
 413	/*
 414	 * Only update the superblock counters if we are logging them
 415	 */
 416	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 417		if (tp->t_icount_delta)
 418			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 419		if (tp->t_ifree_delta)
 420			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 421		if (tp->t_fdblocks_delta)
 422			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 423		if (tp->t_res_fdblocks_delta)
 424			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 425	}
 426
 427	if (tp->t_frextents_delta)
 428		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 429	if (tp->t_res_frextents_delta)
 430		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 431
 432	if (tp->t_dblocks_delta) {
 433		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 434		whole = 1;
 435	}
 436	if (tp->t_agcount_delta) {
 437		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 438		whole = 1;
 439	}
 440	if (tp->t_imaxpct_delta) {
 441		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 442		whole = 1;
 443	}
 444	if (tp->t_rextsize_delta) {
 445		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 446		whole = 1;
 447	}
 448	if (tp->t_rbmblocks_delta) {
 449		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 450		whole = 1;
 451	}
 452	if (tp->t_rblocks_delta) {
 453		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 454		whole = 1;
 455	}
 456	if (tp->t_rextents_delta) {
 457		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 458		whole = 1;
 459	}
 460	if (tp->t_rextslog_delta) {
 461		sbp->sb_rextslog += tp->t_rextslog_delta;
 462		whole = 1;
 463	}
 464
 465	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 466	if (whole)
 467		/*
 468		 * Log the whole thing, the fields are noncontiguous.
 469		 */
 470		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 471	else
 472		/*
 473		 * Since all the modifiable fields are contiguous, we
 474		 * can get away with this.
 475		 */
 476		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 477				  offsetof(xfs_dsb_t, sb_frextents) +
 478				  sizeof(sbp->sb_frextents) - 1);
 479}
 480
 481STATIC int
 482xfs_sb_mod8(
 483	uint8_t			*field,
 484	int8_t			delta)
 485{
 486	int8_t			counter = *field;
 487
 488	counter += delta;
 489	if (counter < 0) {
 490		ASSERT(0);
 491		return -EINVAL;
 492	}
 493	*field = counter;
 494	return 0;
 495}
 496
 497STATIC int
 498xfs_sb_mod32(
 499	uint32_t		*field,
 500	int32_t			delta)
 501{
 502	int32_t			counter = *field;
 503
 504	counter += delta;
 505	if (counter < 0) {
 506		ASSERT(0);
 507		return -EINVAL;
 508	}
 509	*field = counter;
 510	return 0;
 511}
 512
 513STATIC int
 514xfs_sb_mod64(
 515	uint64_t		*field,
 516	int64_t			delta)
 517{
 518	int64_t			counter = *field;
 519
 520	counter += delta;
 521	if (counter < 0) {
 522		ASSERT(0);
 523		return -EINVAL;
 524	}
 525	*field = counter;
 526	return 0;
 527}
 528
 529/*
 530 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 531 * and apply superblock counter changes to the in-core superblock.  The
 532 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 533 * applied to the in-core superblock.  The idea is that that has already been
 534 * done.
 535 *
 536 * If we are not logging superblock counters, then the inode allocated/free and
 537 * used block counts are not updated in the on disk superblock. In this case,
 538 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 539 * still need to update the incore superblock with the changes.
 540 */
 541void
 542xfs_trans_unreserve_and_mod_sb(
 543	struct xfs_trans	*tp)
 544{
 545	struct xfs_mount	*mp = tp->t_mountp;
 546	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 547	int64_t			blkdelta = 0;
 548	int64_t			rtxdelta = 0;
 549	int64_t			idelta = 0;
 550	int64_t			ifreedelta = 0;
 551	int			error;
 552
 553	/* calculate deltas */
 554	if (tp->t_blk_res > 0)
 555		blkdelta = tp->t_blk_res;
 556	if ((tp->t_fdblocks_delta != 0) &&
 557	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 558	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 559	        blkdelta += tp->t_fdblocks_delta;
 560
 561	if (tp->t_rtx_res > 0)
 562		rtxdelta = tp->t_rtx_res;
 563	if ((tp->t_frextents_delta != 0) &&
 564	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 565		rtxdelta += tp->t_frextents_delta;
 566
 567	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 568	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 569		idelta = tp->t_icount_delta;
 570		ifreedelta = tp->t_ifree_delta;
 571	}
 572
 573	/* apply the per-cpu counters */
 574	if (blkdelta) {
 575		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 576		if (error)
 577			goto out;
 578	}
 579
 580	if (idelta) {
 581		error = xfs_mod_icount(mp, idelta);
 582		if (error)
 583			goto out_undo_fdblocks;
 584	}
 585
 586	if (ifreedelta) {
 587		error = xfs_mod_ifree(mp, ifreedelta);
 588		if (error)
 589			goto out_undo_icount;
 590	}
 591
 592	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 593		return;
 594
 595	/* apply remaining deltas */
 596	spin_lock(&mp->m_sb_lock);
 597	if (rtxdelta) {
 598		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
 599		if (error)
 600			goto out_undo_ifree;
 601	}
 602
 603	if (tp->t_dblocks_delta != 0) {
 604		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
 605		if (error)
 606			goto out_undo_frextents;
 607	}
 608	if (tp->t_agcount_delta != 0) {
 609		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
 610		if (error)
 611			goto out_undo_dblocks;
 612	}
 613	if (tp->t_imaxpct_delta != 0) {
 614		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
 615		if (error)
 616			goto out_undo_agcount;
 617	}
 618	if (tp->t_rextsize_delta != 0) {
 619		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
 620				     tp->t_rextsize_delta);
 621		if (error)
 622			goto out_undo_imaxpct;
 623	}
 624	if (tp->t_rbmblocks_delta != 0) {
 625		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
 626				     tp->t_rbmblocks_delta);
 627		if (error)
 628			goto out_undo_rextsize;
 629	}
 630	if (tp->t_rblocks_delta != 0) {
 631		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
 632		if (error)
 633			goto out_undo_rbmblocks;
 634	}
 635	if (tp->t_rextents_delta != 0) {
 636		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
 637				     tp->t_rextents_delta);
 638		if (error)
 639			goto out_undo_rblocks;
 640	}
 641	if (tp->t_rextslog_delta != 0) {
 642		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
 643				     tp->t_rextslog_delta);
 644		if (error)
 645			goto out_undo_rextents;
 646	}
 647	spin_unlock(&mp->m_sb_lock);
 648	return;
 649
 650out_undo_rextents:
 651	if (tp->t_rextents_delta)
 652		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
 653out_undo_rblocks:
 654	if (tp->t_rblocks_delta)
 655		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
 656out_undo_rbmblocks:
 657	if (tp->t_rbmblocks_delta)
 658		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
 659out_undo_rextsize:
 660	if (tp->t_rextsize_delta)
 661		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
 662out_undo_imaxpct:
 663	if (tp->t_rextsize_delta)
 664		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
 665out_undo_agcount:
 666	if (tp->t_agcount_delta)
 667		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
 668out_undo_dblocks:
 669	if (tp->t_dblocks_delta)
 670		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
 671out_undo_frextents:
 672	if (rtxdelta)
 673		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
 674out_undo_ifree:
 675	spin_unlock(&mp->m_sb_lock);
 676	if (ifreedelta)
 677		xfs_mod_ifree(mp, -ifreedelta);
 678out_undo_icount:
 679	if (idelta)
 680		xfs_mod_icount(mp, -idelta);
 681out_undo_fdblocks:
 682	if (blkdelta)
 683		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
 684out:
 685	ASSERT(error == 0);
 686	return;
 687}
 688
 689/*
 690 * Add the given log item to the transaction's list of log items.
 691 *
 692 * The log item will now point to its new descriptor with its li_desc field.
 693 */
 694void
 695xfs_trans_add_item(
 696	struct xfs_trans	*tp,
 697	struct xfs_log_item	*lip)
 698{
 699	struct xfs_log_item_desc *lidp;
 700
 701	ASSERT(lip->li_mountp == tp->t_mountp);
 702	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 703
 704	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
 705
 706	lidp->lid_item = lip;
 707	lidp->lid_flags = 0;
 708	list_add_tail(&lidp->lid_trans, &tp->t_items);
 709
 710	lip->li_desc = lidp;
 711}
 712
 713STATIC void
 714xfs_trans_free_item_desc(
 715	struct xfs_log_item_desc *lidp)
 716{
 717	list_del_init(&lidp->lid_trans);
 718	kmem_zone_free(xfs_log_item_desc_zone, lidp);
 719}
 720
 721/*
 722 * Unlink and free the given descriptor.
 723 */
 724void
 725xfs_trans_del_item(
 726	struct xfs_log_item	*lip)
 727{
 728	xfs_trans_free_item_desc(lip->li_desc);
 729	lip->li_desc = NULL;
 730}
 731
 732/*
 733 * Unlock all of the items of a transaction and free all the descriptors
 734 * of that transaction.
 735 */
 736void
 737xfs_trans_free_items(
 738	struct xfs_trans	*tp,
 739	xfs_lsn_t		commit_lsn,
 740	bool			abort)
 741{
 742	struct xfs_log_item_desc *lidp, *next;
 743
 744	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
 745		struct xfs_log_item	*lip = lidp->lid_item;
 746
 747		lip->li_desc = NULL;
 748
 749		if (commit_lsn != NULLCOMMITLSN)
 750			lip->li_ops->iop_committing(lip, commit_lsn);
 751		if (abort)
 752			lip->li_flags |= XFS_LI_ABORTED;
 753		lip->li_ops->iop_unlock(lip);
 754
 755		xfs_trans_free_item_desc(lidp);
 756	}
 757}
 758
 759static inline void
 760xfs_log_item_batch_insert(
 761	struct xfs_ail		*ailp,
 762	struct xfs_ail_cursor	*cur,
 763	struct xfs_log_item	**log_items,
 764	int			nr_items,
 765	xfs_lsn_t		commit_lsn)
 766{
 767	int	i;
 768
 769	spin_lock(&ailp->xa_lock);
 770	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
 771	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 772
 773	for (i = 0; i < nr_items; i++) {
 774		struct xfs_log_item *lip = log_items[i];
 775
 776		lip->li_ops->iop_unpin(lip, 0);
 777	}
 778}
 779
 780/*
 781 * Bulk operation version of xfs_trans_committed that takes a log vector of
 782 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 783 * minimise lock traffic.
 784 *
 785 * If we are called with the aborted flag set, it is because a log write during
 786 * a CIL checkpoint commit has failed. In this case, all the items in the
 787 * checkpoint have already gone through iop_commited and iop_unlock, which
 788 * means that checkpoint commit abort handling is treated exactly the same
 789 * as an iclog write error even though we haven't started any IO yet. Hence in
 790 * this case all we need to do is iop_committed processing, followed by an
 791 * iop_unpin(aborted) call.
 792 *
 793 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 794 * at the end of the AIL, the insert cursor avoids the need to walk
 795 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 796 * call. This saves a lot of needless list walking and is a net win, even
 797 * though it slightly increases that amount of AIL lock traffic to set it up
 798 * and tear it down.
 799 */
 800void
 801xfs_trans_committed_bulk(
 802	struct xfs_ail		*ailp,
 803	struct xfs_log_vec	*log_vector,
 804	xfs_lsn_t		commit_lsn,
 805	int			aborted)
 806{
 807#define LOG_ITEM_BATCH_SIZE	32
 808	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 809	struct xfs_log_vec	*lv;
 810	struct xfs_ail_cursor	cur;
 811	int			i = 0;
 812
 813	spin_lock(&ailp->xa_lock);
 814	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 815	spin_unlock(&ailp->xa_lock);
 816
 817	/* unpin all the log items */
 818	for (lv = log_vector; lv; lv = lv->lv_next ) {
 819		struct xfs_log_item	*lip = lv->lv_item;
 820		xfs_lsn_t		item_lsn;
 821
 822		if (aborted)
 823			lip->li_flags |= XFS_LI_ABORTED;
 824		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 825
 826		/* item_lsn of -1 means the item needs no further processing */
 827		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 828			continue;
 829
 830		/*
 831		 * if we are aborting the operation, no point in inserting the
 832		 * object into the AIL as we are in a shutdown situation.
 833		 */
 834		if (aborted) {
 835			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
 836			lip->li_ops->iop_unpin(lip, 1);
 837			continue;
 838		}
 839
 840		if (item_lsn != commit_lsn) {
 841
 842			/*
 843			 * Not a bulk update option due to unusual item_lsn.
 844			 * Push into AIL immediately, rechecking the lsn once
 845			 * we have the ail lock. Then unpin the item. This does
 846			 * not affect the AIL cursor the bulk insert path is
 847			 * using.
 848			 */
 849			spin_lock(&ailp->xa_lock);
 850			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 851				xfs_trans_ail_update(ailp, lip, item_lsn);
 852			else
 853				spin_unlock(&ailp->xa_lock);
 854			lip->li_ops->iop_unpin(lip, 0);
 855			continue;
 856		}
 857
 858		/* Item is a candidate for bulk AIL insert.  */
 859		log_items[i++] = lv->lv_item;
 860		if (i >= LOG_ITEM_BATCH_SIZE) {
 861			xfs_log_item_batch_insert(ailp, &cur, log_items,
 862					LOG_ITEM_BATCH_SIZE, commit_lsn);
 863			i = 0;
 864		}
 865	}
 866
 867	/* make sure we insert the remainder! */
 868	if (i)
 869		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 870
 871	spin_lock(&ailp->xa_lock);
 872	xfs_trans_ail_cursor_done(&cur);
 873	spin_unlock(&ailp->xa_lock);
 874}
 875
 876/*
 877 * Commit the given transaction to the log.
 878 *
 879 * XFS disk error handling mechanism is not based on a typical
 880 * transaction abort mechanism. Logically after the filesystem
 881 * gets marked 'SHUTDOWN', we can't let any new transactions
 882 * be durable - ie. committed to disk - because some metadata might
 883 * be inconsistent. In such cases, this returns an error, and the
 884 * caller may assume that all locked objects joined to the transaction
 885 * have already been unlocked as if the commit had succeeded.
 886 * Do not reference the transaction structure after this call.
 887 */
 888static int
 889__xfs_trans_commit(
 890	struct xfs_trans	*tp,
 891	bool			regrant)
 892{
 893	struct xfs_mount	*mp = tp->t_mountp;
 894	xfs_lsn_t		commit_lsn = -1;
 895	int			error = 0;
 896	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 897
 898	/*
 899	 * If there is nothing to be logged by the transaction,
 900	 * then unlock all of the items associated with the
 901	 * transaction and free the transaction structure.
 902	 * Also make sure to return any reserved blocks to
 903	 * the free pool.
 904	 */
 905	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 906		goto out_unreserve;
 907
 908	if (XFS_FORCED_SHUTDOWN(mp)) {
 909		error = -EIO;
 910		goto out_unreserve;
 911	}
 912
 913	ASSERT(tp->t_ticket != NULL);
 914
 915	/*
 916	 * If we need to update the superblock, then do it now.
 917	 */
 918	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 919		xfs_trans_apply_sb_deltas(tp);
 920	xfs_trans_apply_dquot_deltas(tp);
 921
 922	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 923
 924	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 925	xfs_trans_free(tp);
 926
 927	/*
 928	 * If the transaction needs to be synchronous, then force the
 929	 * log out now and wait for it.
 930	 */
 931	if (sync) {
 932		error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 933		XFS_STATS_INC(mp, xs_trans_sync);
 934	} else {
 935		XFS_STATS_INC(mp, xs_trans_async);
 936	}
 937
 938	return error;
 939
 940out_unreserve:
 941	xfs_trans_unreserve_and_mod_sb(tp);
 942
 943	/*
 944	 * It is indeed possible for the transaction to be not dirty but
 945	 * the dqinfo portion to be.  All that means is that we have some
 946	 * (non-persistent) quota reservations that need to be unreserved.
 947	 */
 948	xfs_trans_unreserve_and_mod_dquots(tp);
 949	if (tp->t_ticket) {
 950		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
 951		if (commit_lsn == -1 && !error)
 952			error = -EIO;
 953	}
 954	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 955	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
 956	xfs_trans_free(tp);
 957
 958	XFS_STATS_INC(mp, xs_trans_empty);
 959	return error;
 960}
 961
 962int
 963xfs_trans_commit(
 964	struct xfs_trans	*tp)
 965{
 966	return __xfs_trans_commit(tp, false);
 967}
 968
 969/*
 970 * Unlock all of the transaction's items and free the transaction.
 971 * The transaction must not have modified any of its items, because
 972 * there is no way to restore them to their previous state.
 973 *
 974 * If the transaction has made a log reservation, make sure to release
 975 * it as well.
 976 */
 977void
 978xfs_trans_cancel(
 979	struct xfs_trans	*tp)
 980{
 981	struct xfs_mount	*mp = tp->t_mountp;
 982	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
 983
 984	/*
 985	 * See if the caller is relying on us to shut down the
 986	 * filesystem.  This happens in paths where we detect
 987	 * corruption and decide to give up.
 988	 */
 989	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 990		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
 991		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 992	}
 993#ifdef DEBUG
 994	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
 995		struct xfs_log_item_desc *lidp;
 996
 997		list_for_each_entry(lidp, &tp->t_items, lid_trans)
 998			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
 999	}
1000#endif
1001	xfs_trans_unreserve_and_mod_sb(tp);
1002	xfs_trans_unreserve_and_mod_dquots(tp);
1003
1004	if (tp->t_ticket)
1005		xfs_log_done(mp, tp->t_ticket, NULL, false);
1006
1007	/* mark this thread as no longer being in a transaction */
1008	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1009
1010	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1011	xfs_trans_free(tp);
1012}
1013
1014/*
1015 * Roll from one trans in the sequence of PERMANENT transactions to
1016 * the next: permanent transactions are only flushed out when
1017 * committed with xfs_trans_commit(), but we still want as soon
1018 * as possible to let chunks of it go to the log. So we commit the
1019 * chunk we've been working on and get a new transaction to continue.
1020 */
1021int
1022__xfs_trans_roll(
1023	struct xfs_trans	**tpp,
1024	struct xfs_inode	*dp,
1025	int			*committed)
1026{
1027	struct xfs_trans	*trans;
1028	struct xfs_trans_res	tres;
1029	int			error;
1030
1031	*committed = 0;
1032
1033	/*
1034	 * Ensure that the inode is always logged.
1035	 */
1036	trans = *tpp;
1037	if (dp)
1038		xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1039
1040	/*
1041	 * Copy the critical parameters from one trans to the next.
1042	 */
1043	tres.tr_logres = trans->t_log_res;
1044	tres.tr_logcount = trans->t_log_count;
1045	*tpp = xfs_trans_dup(trans);
1046
1047	/*
1048	 * Commit the current transaction.
1049	 * If this commit failed, then it'd just unlock those items that
1050	 * are not marked ihold. That also means that a filesystem shutdown
1051	 * is in progress. The caller takes the responsibility to cancel
1052	 * the duplicate transaction that gets returned.
1053	 */
1054	error = __xfs_trans_commit(trans, true);
1055	if (error)
1056		return error;
1057
1058	*committed = 1;
1059	trans = *tpp;
1060
1061	/*
1062	 * Reserve space in the log for th next transaction.
1063	 * This also pushes items in the "AIL", the list of logged items,
1064	 * out to disk if they are taking up space at the tail of the log
1065	 * that we want to use.  This requires that either nothing be locked
1066	 * across this call, or that anything that is locked be logged in
1067	 * the prior and the next transactions.
1068	 */
1069	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1070	error = xfs_trans_reserve(trans, &tres, 0, 0);
1071	/*
1072	 *  Ensure that the inode is in the new transaction and locked.
1073	 */
1074	if (error)
1075		return error;
1076
1077	if (dp)
1078		xfs_trans_ijoin(trans, dp, 0);
1079	return 0;
1080}
1081
1082int
1083xfs_trans_roll(
1084	struct xfs_trans	**tpp,
1085	struct xfs_inode	*dp)
1086{
1087	int			committed;
1088	return __xfs_trans_roll(tpp, dp, &committed);
1089}