Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_types.h"
  22#include "xfs_bit.h"
  23#include "xfs_log.h"
  24#include "xfs_inum.h"
  25#include "xfs_trans.h"
  26#include "xfs_sb.h"
  27#include "xfs_ag.h"
  28#include "xfs_mount.h"
  29#include "xfs_error.h"
  30#include "xfs_da_btree.h"
  31#include "xfs_bmap_btree.h"
  32#include "xfs_alloc_btree.h"
  33#include "xfs_ialloc_btree.h"
  34#include "xfs_dinode.h"
  35#include "xfs_inode.h"
  36#include "xfs_btree.h"
  37#include "xfs_ialloc.h"
  38#include "xfs_alloc.h"
  39#include "xfs_bmap.h"
  40#include "xfs_quota.h"
 
  41#include "xfs_trans_priv.h"
  42#include "xfs_trans_space.h"
  43#include "xfs_inode_item.h"
  44#include "xfs_trace.h"
 
  45
  46kmem_zone_t	*xfs_trans_zone;
  47kmem_zone_t	*xfs_log_item_desc_zone;
  48
  49
  50/*
  51 * Various log reservation values.
  52 *
  53 * These are based on the size of the file system block because that is what
  54 * most transactions manipulate.  Each adds in an additional 128 bytes per
  55 * item logged to try to account for the overhead of the transaction mechanism.
  56 *
  57 * Note:  Most of the reservations underestimate the number of allocation
  58 * groups into which they could free extents in the xfs_bmap_finish() call.
  59 * This is because the number in the worst case is quite high and quite
  60 * unusual.  In order to fix this we need to change xfs_bmap_finish() to free
  61 * extents in only a single AG at a time.  This will require changes to the
  62 * EFI code as well, however, so that the EFI for the extents not freed is
  63 * logged again in each transaction.  See SGI PV #261917.
  64 *
  65 * Reservation functions here avoid a huge stack in xfs_trans_init due to
  66 * register overflow from temporaries in the calculations.
  67 */
  68
  69
  70/*
  71 * In a write transaction we can allocate a maximum of 2
  72 * extents.  This gives:
  73 *    the inode getting the new extents: inode size
  74 *    the inode's bmap btree: max depth * block size
  75 *    the agfs of the ags from which the extents are allocated: 2 * sector
  76 *    the superblock free block counter: sector size
  77 *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
  78 * And the bmap_finish transaction can free bmap blocks in a join:
  79 *    the agfs of the ags containing the blocks: 2 * sector size
  80 *    the agfls of the ags containing the blocks: 2 * sector size
  81 *    the super block free block counter: sector size
  82 *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
  83 */
  84STATIC uint
  85xfs_calc_write_reservation(
  86	struct xfs_mount	*mp)
  87{
  88	return XFS_DQUOT_LOGRES(mp) +
  89		MAX((mp->m_sb.sb_inodesize +
  90		     XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) +
  91		     2 * mp->m_sb.sb_sectsize +
  92		     mp->m_sb.sb_sectsize +
  93		     XFS_ALLOCFREE_LOG_RES(mp, 2) +
  94		     128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) +
  95			    XFS_ALLOCFREE_LOG_COUNT(mp, 2))),
  96		    (2 * mp->m_sb.sb_sectsize +
  97		     2 * mp->m_sb.sb_sectsize +
  98		     mp->m_sb.sb_sectsize +
  99		     XFS_ALLOCFREE_LOG_RES(mp, 2) +
 100		     128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
 101}
 102
 103/*
 104 * In truncating a file we free up to two extents at once.  We can modify:
 105 *    the inode being truncated: inode size
 106 *    the inode's bmap btree: (max depth + 1) * block size
 107 * And the bmap_finish transaction can free the blocks and bmap blocks:
 108 *    the agf for each of the ags: 4 * sector size
 109 *    the agfl for each of the ags: 4 * sector size
 110 *    the super block to reflect the freed blocks: sector size
 111 *    worst case split in allocation btrees per extent assuming 4 extents:
 112 *		4 exts * 2 trees * (2 * max depth - 1) * block size
 113 *    the inode btree: max depth * blocksize
 114 *    the allocation btrees: 2 trees * (max depth - 1) * block size
 115 */
 116STATIC uint
 117xfs_calc_itruncate_reservation(
 118	struct xfs_mount	*mp)
 119{
 120	return XFS_DQUOT_LOGRES(mp) +
 121		MAX((mp->m_sb.sb_inodesize +
 122		     XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) +
 123		     128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))),
 124		    (4 * mp->m_sb.sb_sectsize +
 125		     4 * mp->m_sb.sb_sectsize +
 126		     mp->m_sb.sb_sectsize +
 127		     XFS_ALLOCFREE_LOG_RES(mp, 4) +
 128		     128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)) +
 129		     128 * 5 +
 130		     XFS_ALLOCFREE_LOG_RES(mp, 1) +
 131		     128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
 132			    XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
 133}
 134
 135/*
 136 * In renaming a files we can modify:
 137 *    the four inodes involved: 4 * inode size
 138 *    the two directory btrees: 2 * (max depth + v2) * dir block size
 139 *    the two directory bmap btrees: 2 * max depth * block size
 140 * And the bmap_finish transaction can free dir and bmap blocks (two sets
 141 *	of bmap blocks) giving:
 142 *    the agf for the ags in which the blocks live: 3 * sector size
 143 *    the agfl for the ags in which the blocks live: 3 * sector size
 144 *    the superblock for the free block count: sector size
 145 *    the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
 146 */
 147STATIC uint
 148xfs_calc_rename_reservation(
 149	struct xfs_mount	*mp)
 150{
 151	return XFS_DQUOT_LOGRES(mp) +
 152		MAX((4 * mp->m_sb.sb_inodesize +
 153		     2 * XFS_DIROP_LOG_RES(mp) +
 154		     128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp))),
 155		    (3 * mp->m_sb.sb_sectsize +
 156		     3 * mp->m_sb.sb_sectsize +
 157		     mp->m_sb.sb_sectsize +
 158		     XFS_ALLOCFREE_LOG_RES(mp, 3) +
 159		     128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3))));
 160}
 161
 162/*
 163 * For creating a link to an inode:
 164 *    the parent directory inode: inode size
 165 *    the linked inode: inode size
 166 *    the directory btree could split: (max depth + v2) * dir block size
 167 *    the directory bmap btree could join or split: (max depth + v2) * blocksize
 168 * And the bmap_finish transaction can free some bmap blocks giving:
 169 *    the agf for the ag in which the blocks live: sector size
 170 *    the agfl for the ag in which the blocks live: sector size
 171 *    the superblock for the free block count: sector size
 172 *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
 173 */
 174STATIC uint
 175xfs_calc_link_reservation(
 176	struct xfs_mount	*mp)
 177{
 178	return XFS_DQUOT_LOGRES(mp) +
 179		MAX((mp->m_sb.sb_inodesize +
 180		     mp->m_sb.sb_inodesize +
 181		     XFS_DIROP_LOG_RES(mp) +
 182		     128 * (2 + XFS_DIROP_LOG_COUNT(mp))),
 183		    (mp->m_sb.sb_sectsize +
 184		     mp->m_sb.sb_sectsize +
 185		     mp->m_sb.sb_sectsize +
 186		     XFS_ALLOCFREE_LOG_RES(mp, 1) +
 187		     128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
 188}
 189
 190/*
 191 * For removing a directory entry we can modify:
 192 *    the parent directory inode: inode size
 193 *    the removed inode: inode size
 194 *    the directory btree could join: (max depth + v2) * dir block size
 195 *    the directory bmap btree could join or split: (max depth + v2) * blocksize
 196 * And the bmap_finish transaction can free the dir and bmap blocks giving:
 197 *    the agf for the ag in which the blocks live: 2 * sector size
 198 *    the agfl for the ag in which the blocks live: 2 * sector size
 199 *    the superblock for the free block count: sector size
 200 *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
 201 */
 202STATIC uint
 203xfs_calc_remove_reservation(
 204	struct xfs_mount	*mp)
 205{
 206	return XFS_DQUOT_LOGRES(mp) +
 207		MAX((mp->m_sb.sb_inodesize +
 208		     mp->m_sb.sb_inodesize +
 209		     XFS_DIROP_LOG_RES(mp) +
 210		     128 * (2 + XFS_DIROP_LOG_COUNT(mp))),
 211		    (2 * mp->m_sb.sb_sectsize +
 212		     2 * mp->m_sb.sb_sectsize +
 213		     mp->m_sb.sb_sectsize +
 214		     XFS_ALLOCFREE_LOG_RES(mp, 2) +
 215		     128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
 216}
 217
 218/*
 219 * For symlink we can modify:
 220 *    the parent directory inode: inode size
 221 *    the new inode: inode size
 222 *    the inode btree entry: 1 block
 223 *    the directory btree: (max depth + v2) * dir block size
 224 *    the directory inode's bmap btree: (max depth + v2) * block size
 225 *    the blocks for the symlink: 1 kB
 226 * Or in the first xact we allocate some inodes giving:
 227 *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
 228 *    the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
 229 *    the inode btree: max depth * blocksize
 230 *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
 231 */
 232STATIC uint
 233xfs_calc_symlink_reservation(
 234	struct xfs_mount	*mp)
 235{
 236	return XFS_DQUOT_LOGRES(mp) +
 237		MAX((mp->m_sb.sb_inodesize +
 238		     mp->m_sb.sb_inodesize +
 239		     XFS_FSB_TO_B(mp, 1) +
 240		     XFS_DIROP_LOG_RES(mp) +
 241		     1024 +
 242		     128 * (4 + XFS_DIROP_LOG_COUNT(mp))),
 243		    (2 * mp->m_sb.sb_sectsize +
 244		     XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) +
 245		     XFS_FSB_TO_B(mp, mp->m_in_maxlevels) +
 246		     XFS_ALLOCFREE_LOG_RES(mp, 1) +
 247		     128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
 248			    XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
 249}
 250
 251/*
 252 * For create we can modify:
 253 *    the parent directory inode: inode size
 254 *    the new inode: inode size
 255 *    the inode btree entry: block size
 256 *    the superblock for the nlink flag: sector size
 257 *    the directory btree: (max depth + v2) * dir block size
 258 *    the directory inode's bmap btree: (max depth + v2) * block size
 259 * Or in the first xact we allocate some inodes giving:
 260 *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
 261 *    the superblock for the nlink flag: sector size
 262 *    the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
 263 *    the inode btree: max depth * blocksize
 264 *    the allocation btrees: 2 trees * (max depth - 1) * block size
 265 */
 266STATIC uint
 267xfs_calc_create_reservation(
 268	struct xfs_mount	*mp)
 269{
 270	return XFS_DQUOT_LOGRES(mp) +
 271		MAX((mp->m_sb.sb_inodesize +
 272		     mp->m_sb.sb_inodesize +
 273		     mp->m_sb.sb_sectsize +
 274		     XFS_FSB_TO_B(mp, 1) +
 275		     XFS_DIROP_LOG_RES(mp) +
 276		     128 * (3 + XFS_DIROP_LOG_COUNT(mp))),
 277		    (3 * mp->m_sb.sb_sectsize +
 278		     XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) +
 279		     XFS_FSB_TO_B(mp, mp->m_in_maxlevels) +
 280		     XFS_ALLOCFREE_LOG_RES(mp, 1) +
 281		     128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
 282			    XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
 283}
 284
 285/*
 286 * Making a new directory is the same as creating a new file.
 287 */
 288STATIC uint
 289xfs_calc_mkdir_reservation(
 290	struct xfs_mount	*mp)
 291{
 292	return xfs_calc_create_reservation(mp);
 293}
 294
 295/*
 296 * In freeing an inode we can modify:
 297 *    the inode being freed: inode size
 298 *    the super block free inode counter: sector size
 299 *    the agi hash list and counters: sector size
 300 *    the inode btree entry: block size
 301 *    the on disk inode before ours in the agi hash list: inode cluster size
 302 *    the inode btree: max depth * blocksize
 303 *    the allocation btrees: 2 trees * (max depth - 1) * block size
 304 */
 305STATIC uint
 306xfs_calc_ifree_reservation(
 307	struct xfs_mount	*mp)
 308{
 309	return XFS_DQUOT_LOGRES(mp) +
 310		mp->m_sb.sb_inodesize +
 311		mp->m_sb.sb_sectsize +
 312		mp->m_sb.sb_sectsize +
 313		XFS_FSB_TO_B(mp, 1) +
 314		MAX((__uint16_t)XFS_FSB_TO_B(mp, 1),
 315		    XFS_INODE_CLUSTER_SIZE(mp)) +
 316		128 * 5 +
 317		XFS_ALLOCFREE_LOG_RES(mp, 1) +
 318		128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
 319		       XFS_ALLOCFREE_LOG_COUNT(mp, 1));
 320}
 321
 322/*
 323 * When only changing the inode we log the inode and possibly the superblock
 324 * We also add a bit of slop for the transaction stuff.
 325 */
 326STATIC uint
 327xfs_calc_ichange_reservation(
 328	struct xfs_mount	*mp)
 329{
 330	return XFS_DQUOT_LOGRES(mp) +
 331		mp->m_sb.sb_inodesize +
 332		mp->m_sb.sb_sectsize +
 333		512;
 334
 335}
 336
 337/*
 338 * Growing the data section of the filesystem.
 339 *	superblock
 340 *	agi and agf
 341 *	allocation btrees
 342 */
 343STATIC uint
 344xfs_calc_growdata_reservation(
 345	struct xfs_mount	*mp)
 346{
 347	return mp->m_sb.sb_sectsize * 3 +
 348		XFS_ALLOCFREE_LOG_RES(mp, 1) +
 349		128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1));
 350}
 351
 352/*
 353 * Growing the rt section of the filesystem.
 354 * In the first set of transactions (ALLOC) we allocate space to the
 355 * bitmap or summary files.
 356 *	superblock: sector size
 357 *	agf of the ag from which the extent is allocated: sector size
 358 *	bmap btree for bitmap/summary inode: max depth * blocksize
 359 *	bitmap/summary inode: inode size
 360 *	allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
 361 */
 362STATIC uint
 363xfs_calc_growrtalloc_reservation(
 364	struct xfs_mount	*mp)
 365{
 366	return 2 * mp->m_sb.sb_sectsize +
 367		XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) +
 368		mp->m_sb.sb_inodesize +
 369		XFS_ALLOCFREE_LOG_RES(mp, 1) +
 370		128 * (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) +
 371		       XFS_ALLOCFREE_LOG_COUNT(mp, 1));
 372}
 373
 374/*
 375 * Growing the rt section of the filesystem.
 376 * In the second set of transactions (ZERO) we zero the new metadata blocks.
 377 *	one bitmap/summary block: blocksize
 378 */
 379STATIC uint
 380xfs_calc_growrtzero_reservation(
 381	struct xfs_mount	*mp)
 382{
 383	return mp->m_sb.sb_blocksize + 128;
 384}
 385
 386/*
 387 * Growing the rt section of the filesystem.
 388 * In the third set of transactions (FREE) we update metadata without
 389 * allocating any new blocks.
 390 *	superblock: sector size
 391 *	bitmap inode: inode size
 392 *	summary inode: inode size
 393 *	one bitmap block: blocksize
 394 *	summary blocks: new summary size
 395 */
 396STATIC uint
 397xfs_calc_growrtfree_reservation(
 398	struct xfs_mount	*mp)
 399{
 400	return mp->m_sb.sb_sectsize +
 401		2 * mp->m_sb.sb_inodesize +
 402		mp->m_sb.sb_blocksize +
 403		mp->m_rsumsize +
 404		128 * 5;
 405}
 406
 407/*
 408 * Logging the inode modification timestamp on a synchronous write.
 409 *	inode
 410 */
 411STATIC uint
 412xfs_calc_swrite_reservation(
 413	struct xfs_mount	*mp)
 414{
 415	return mp->m_sb.sb_inodesize + 128;
 416}
 417
 418/*
 419 * Logging the inode mode bits when writing a setuid/setgid file
 420 *	inode
 421 */
 422STATIC uint
 423xfs_calc_writeid_reservation(xfs_mount_t *mp)
 424{
 425	return mp->m_sb.sb_inodesize + 128;
 426}
 427
 428/*
 429 * Converting the inode from non-attributed to attributed.
 430 *	the inode being converted: inode size
 431 *	agf block and superblock (for block allocation)
 432 *	the new block (directory sized)
 433 *	bmap blocks for the new directory block
 434 *	allocation btrees
 435 */
 436STATIC uint
 437xfs_calc_addafork_reservation(
 438	struct xfs_mount	*mp)
 439{
 440	return XFS_DQUOT_LOGRES(mp) +
 441		mp->m_sb.sb_inodesize +
 442		mp->m_sb.sb_sectsize * 2 +
 443		mp->m_dirblksize +
 444		XFS_FSB_TO_B(mp, XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) +
 445		XFS_ALLOCFREE_LOG_RES(mp, 1) +
 446		128 * (4 + XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1 +
 447		       XFS_ALLOCFREE_LOG_COUNT(mp, 1));
 448}
 449
 450/*
 451 * Removing the attribute fork of a file
 452 *    the inode being truncated: inode size
 453 *    the inode's bmap btree: max depth * block size
 454 * And the bmap_finish transaction can free the blocks and bmap blocks:
 455 *    the agf for each of the ags: 4 * sector size
 456 *    the agfl for each of the ags: 4 * sector size
 457 *    the super block to reflect the freed blocks: sector size
 458 *    worst case split in allocation btrees per extent assuming 4 extents:
 459 *		4 exts * 2 trees * (2 * max depth - 1) * block size
 460 */
 461STATIC uint
 462xfs_calc_attrinval_reservation(
 463	struct xfs_mount	*mp)
 464{
 465	return MAX((mp->m_sb.sb_inodesize +
 466		    XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
 467		    128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))),
 468		   (4 * mp->m_sb.sb_sectsize +
 469		    4 * mp->m_sb.sb_sectsize +
 470		    mp->m_sb.sb_sectsize +
 471		    XFS_ALLOCFREE_LOG_RES(mp, 4) +
 472		    128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))));
 473}
 474
 475/*
 476 * Setting an attribute.
 477 *	the inode getting the attribute
 478 *	the superblock for allocations
 479 *	the agfs extents are allocated from
 480 *	the attribute btree * max depth
 481 *	the inode allocation btree
 482 * Since attribute transaction space is dependent on the size of the attribute,
 483 * the calculation is done partially at mount time and partially at runtime.
 484 */
 485STATIC uint
 486xfs_calc_attrset_reservation(
 487	struct xfs_mount	*mp)
 488{
 489	return XFS_DQUOT_LOGRES(mp) +
 490		mp->m_sb.sb_inodesize +
 491		mp->m_sb.sb_sectsize +
 492		XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) +
 493		128 * (2 + XFS_DA_NODE_MAXDEPTH);
 494}
 495
 496/*
 497 * Removing an attribute.
 498 *    the inode: inode size
 499 *    the attribute btree could join: max depth * block size
 500 *    the inode bmap btree could join or split: max depth * block size
 501 * And the bmap_finish transaction can free the attr blocks freed giving:
 502 *    the agf for the ag in which the blocks live: 2 * sector size
 503 *    the agfl for the ag in which the blocks live: 2 * sector size
 504 *    the superblock for the free block count: sector size
 505 *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
 506 */
 507STATIC uint
 508xfs_calc_attrrm_reservation(
 509	struct xfs_mount	*mp)
 510{
 511	return XFS_DQUOT_LOGRES(mp) +
 512		MAX((mp->m_sb.sb_inodesize +
 513		     XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) +
 514		     XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
 515		     128 * (1 + XFS_DA_NODE_MAXDEPTH +
 516			    XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))),
 517		    (2 * mp->m_sb.sb_sectsize +
 518		     2 * mp->m_sb.sb_sectsize +
 519		     mp->m_sb.sb_sectsize +
 520		     XFS_ALLOCFREE_LOG_RES(mp, 2) +
 521		     128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
 522}
 523
 524/*
 525 * Clearing a bad agino number in an agi hash bucket.
 526 */
 527STATIC uint
 528xfs_calc_clear_agi_bucket_reservation(
 529	struct xfs_mount	*mp)
 530{
 531	return mp->m_sb.sb_sectsize + 128;
 
 
 
 
 
 
 
 
 
 
 532}
 
 
 
 533
 534/*
 535 * Initialize the precomputed transaction reservation values
 536 * in the mount structure.
 537 */
 538void
 539xfs_trans_init(
 540	struct xfs_mount	*mp)
 541{
 542	struct xfs_trans_reservations *resp = &mp->m_reservations;
 543
 544	resp->tr_write = xfs_calc_write_reservation(mp);
 545	resp->tr_itruncate = xfs_calc_itruncate_reservation(mp);
 546	resp->tr_rename = xfs_calc_rename_reservation(mp);
 547	resp->tr_link = xfs_calc_link_reservation(mp);
 548	resp->tr_remove = xfs_calc_remove_reservation(mp);
 549	resp->tr_symlink = xfs_calc_symlink_reservation(mp);
 550	resp->tr_create = xfs_calc_create_reservation(mp);
 551	resp->tr_mkdir = xfs_calc_mkdir_reservation(mp);
 552	resp->tr_ifree = xfs_calc_ifree_reservation(mp);
 553	resp->tr_ichange = xfs_calc_ichange_reservation(mp);
 554	resp->tr_growdata = xfs_calc_growdata_reservation(mp);
 555	resp->tr_swrite = xfs_calc_swrite_reservation(mp);
 556	resp->tr_writeid = xfs_calc_writeid_reservation(mp);
 557	resp->tr_addafork = xfs_calc_addafork_reservation(mp);
 558	resp->tr_attrinval = xfs_calc_attrinval_reservation(mp);
 559	resp->tr_attrset = xfs_calc_attrset_reservation(mp);
 560	resp->tr_attrrm = xfs_calc_attrrm_reservation(mp);
 561	resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp);
 562	resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp);
 563	resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp);
 564	resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp);
 565}
 566
 567/*
 568 * This routine is called to allocate a transaction structure.
 569 * The type parameter indicates the type of the transaction.  These
 570 * are enumerated in xfs_trans.h.
 571 *
 572 * Dynamically allocate the transaction structure from the transaction
 573 * zone, initialize it, and return it to the caller.
 574 */
 575xfs_trans_t *
 576xfs_trans_alloc(
 577	xfs_mount_t	*mp,
 578	uint		type)
 579{
 580	xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
 581	return _xfs_trans_alloc(mp, type, KM_SLEEP);
 582}
 583
 584xfs_trans_t *
 585_xfs_trans_alloc(
 586	xfs_mount_t	*mp,
 587	uint		type,
 588	uint		memflags)
 589{
 590	xfs_trans_t	*tp;
 591
 592	atomic_inc(&mp->m_active_trans);
 593
 594	tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
 595	tp->t_magic = XFS_TRANS_MAGIC;
 596	tp->t_type = type;
 597	tp->t_mountp = mp;
 598	INIT_LIST_HEAD(&tp->t_items);
 599	INIT_LIST_HEAD(&tp->t_busy);
 600	return tp;
 601}
 602
 603/*
 604 * Free the transaction structure.  If there is more clean up
 605 * to do when the structure is freed, add it here.
 606 */
 607STATIC void
 608xfs_trans_free(
 609	struct xfs_trans	*tp)
 610{
 611	xfs_alloc_busy_sort(&tp->t_busy);
 612	xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy, false);
 613
 614	atomic_dec(&tp->t_mountp->m_active_trans);
 
 
 615	xfs_trans_free_dqinfo(tp);
 616	kmem_zone_free(xfs_trans_zone, tp);
 617}
 618
 619/*
 620 * This is called to create a new transaction which will share the
 621 * permanent log reservation of the given transaction.  The remaining
 622 * unused block and rt extent reservations are also inherited.  This
 623 * implies that the original transaction is no longer allowed to allocate
 624 * blocks.  Locks and log items, however, are no inherited.  They must
 625 * be added to the new transaction explicitly.
 626 */
 627xfs_trans_t *
 628xfs_trans_dup(
 629	xfs_trans_t	*tp)
 630{
 631	xfs_trans_t	*ntp;
 632
 633	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
 634
 635	/*
 636	 * Initialize the new transaction structure.
 637	 */
 638	ntp->t_magic = XFS_TRANS_MAGIC;
 639	ntp->t_type = tp->t_type;
 640	ntp->t_mountp = tp->t_mountp;
 641	INIT_LIST_HEAD(&ntp->t_items);
 642	INIT_LIST_HEAD(&ntp->t_busy);
 643
 644	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 645	ASSERT(tp->t_ticket != NULL);
 646
 647	ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE);
 
 
 
 
 648	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 
 
 649	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 650	tp->t_blk_res = tp->t_blk_res_used;
 
 651	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 652	tp->t_rtx_res = tp->t_rtx_res_used;
 653	ntp->t_pflags = tp->t_pflags;
 654
 655	xfs_trans_dup_dqinfo(tp, ntp);
 656
 657	atomic_inc(&tp->t_mountp->m_active_trans);
 658	return ntp;
 659}
 660
 661/*
 662 * This is called to reserve free disk blocks and log space for the
 663 * given transaction.  This must be done before allocating any resources
 664 * within the transaction.
 665 *
 666 * This will return ENOSPC if there are not enough blocks available.
 667 * It will sleep waiting for available log space.
 668 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 669 * is used by long running transactions.  If any one of the reservations
 670 * fails then they will all be backed out.
 671 *
 672 * This does not do quota reservations. That typically is done by the
 673 * caller afterwards.
 674 */
 675int
 676xfs_trans_reserve(
 677	xfs_trans_t	*tp,
 678	uint		blocks,
 679	uint		logspace,
 680	uint		rtextents,
 681	uint		flags,
 682	uint		logcount)
 683{
 684	int		log_flags;
 685	int		error = 0;
 686	int		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 687
 688	/* Mark this thread as being in a transaction */
 689	current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
 690
 691	/*
 692	 * Attempt to reserve the needed disk blocks by decrementing
 693	 * the number needed from the number available.  This will
 694	 * fail if the count would go below zero.
 695	 */
 696	if (blocks > 0) {
 697		error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
 698					  -((int64_t)blocks), rsvd);
 699		if (error != 0) {
 700			current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 701			return (XFS_ERROR(ENOSPC));
 702		}
 703		tp->t_blk_res += blocks;
 704	}
 705
 706	/*
 707	 * Reserve the log space needed for this transaction.
 708	 */
 709	if (logspace > 0) {
 710		ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace));
 711		ASSERT((tp->t_log_count == 0) ||
 712			(tp->t_log_count == logcount));
 713		if (flags & XFS_TRANS_PERM_LOG_RES) {
 714			log_flags = XFS_LOG_PERM_RESERV;
 
 
 
 715			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 
 716		} else {
 717			ASSERT(tp->t_ticket == NULL);
 718			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 719			log_flags = 0;
 720		}
 721
 722		error = xfs_log_reserve(tp->t_mountp, logspace, logcount,
 723					&tp->t_ticket,
 724					XFS_TRANSACTION, log_flags, tp->t_type);
 725		if (error) {
 726			goto undo_blocks;
 
 
 
 
 727		}
 728		tp->t_log_res = logspace;
 729		tp->t_log_count = logcount;
 
 
 
 
 730	}
 731
 732	/*
 733	 * Attempt to reserve the needed realtime extents by decrementing
 734	 * the number needed from the number available.  This will
 735	 * fail if the count would go below zero.
 736	 */
 737	if (rtextents > 0) {
 738		error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS,
 739					  -((int64_t)rtextents), rsvd);
 740		if (error) {
 741			error = XFS_ERROR(ENOSPC);
 742			goto undo_log;
 743		}
 744		tp->t_rtx_res += rtextents;
 745	}
 746
 747	return 0;
 748
 749	/*
 750	 * Error cases jump to one of these labels to undo any
 751	 * reservations which have already been performed.
 752	 */
 753undo_log:
 754	if (logspace > 0) {
 755		if (flags & XFS_TRANS_PERM_LOG_RES) {
 756			log_flags = XFS_LOG_REL_PERM_RESERV;
 757		} else {
 758			log_flags = 0;
 759		}
 760		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
 761		tp->t_ticket = NULL;
 762		tp->t_log_res = 0;
 763		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 764	}
 765
 766undo_blocks:
 767	if (blocks > 0) {
 768		xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
 769					 (int64_t)blocks, rsvd);
 770		tp->t_blk_res = 0;
 771	}
 772
 773	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 774
 775	return error;
 776}
 777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 778/*
 779 * Record the indicated change to the given field for application
 780 * to the file system's superblock when the transaction commits.
 781 * For now, just store the change in the transaction structure.
 782 *
 783 * Mark the transaction structure to indicate that the superblock
 784 * needs to be updated before committing.
 785 *
 786 * Because we may not be keeping track of allocated/free inodes and
 787 * used filesystem blocks in the superblock, we do not mark the
 788 * superblock dirty in this transaction if we modify these fields.
 789 * We still need to update the transaction deltas so that they get
 790 * applied to the incore superblock, but we don't want them to
 791 * cause the superblock to get locked and logged if these are the
 792 * only fields in the superblock that the transaction modifies.
 793 */
 794void
 795xfs_trans_mod_sb(
 796	xfs_trans_t	*tp,
 797	uint		field,
 798	int64_t		delta)
 799{
 800	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 801	xfs_mount_t	*mp = tp->t_mountp;
 802
 803	switch (field) {
 804	case XFS_TRANS_SB_ICOUNT:
 805		tp->t_icount_delta += delta;
 806		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 807			flags &= ~XFS_TRANS_SB_DIRTY;
 808		break;
 809	case XFS_TRANS_SB_IFREE:
 810		tp->t_ifree_delta += delta;
 811		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 812			flags &= ~XFS_TRANS_SB_DIRTY;
 813		break;
 814	case XFS_TRANS_SB_FDBLOCKS:
 815		/*
 816		 * Track the number of blocks allocated in the
 817		 * transaction.  Make sure it does not exceed the
 818		 * number reserved.
 819		 */
 820		if (delta < 0) {
 821			tp->t_blk_res_used += (uint)-delta;
 822			ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
 
 823		}
 824		tp->t_fdblocks_delta += delta;
 825		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 826			flags &= ~XFS_TRANS_SB_DIRTY;
 827		break;
 828	case XFS_TRANS_SB_RES_FDBLOCKS:
 829		/*
 830		 * The allocation has already been applied to the
 831		 * in-core superblock's counter.  This should only
 832		 * be applied to the on-disk superblock.
 833		 */
 834		ASSERT(delta < 0);
 835		tp->t_res_fdblocks_delta += delta;
 836		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 837			flags &= ~XFS_TRANS_SB_DIRTY;
 838		break;
 839	case XFS_TRANS_SB_FREXTENTS:
 840		/*
 841		 * Track the number of blocks allocated in the
 842		 * transaction.  Make sure it does not exceed the
 843		 * number reserved.
 844		 */
 845		if (delta < 0) {
 846			tp->t_rtx_res_used += (uint)-delta;
 847			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 848		}
 849		tp->t_frextents_delta += delta;
 850		break;
 851	case XFS_TRANS_SB_RES_FREXTENTS:
 852		/*
 853		 * The allocation has already been applied to the
 854		 * in-core superblock's counter.  This should only
 855		 * be applied to the on-disk superblock.
 856		 */
 857		ASSERT(delta < 0);
 858		tp->t_res_frextents_delta += delta;
 859		break;
 860	case XFS_TRANS_SB_DBLOCKS:
 861		ASSERT(delta > 0);
 862		tp->t_dblocks_delta += delta;
 863		break;
 864	case XFS_TRANS_SB_AGCOUNT:
 865		ASSERT(delta > 0);
 866		tp->t_agcount_delta += delta;
 867		break;
 868	case XFS_TRANS_SB_IMAXPCT:
 869		tp->t_imaxpct_delta += delta;
 870		break;
 871	case XFS_TRANS_SB_REXTSIZE:
 872		tp->t_rextsize_delta += delta;
 873		break;
 874	case XFS_TRANS_SB_RBMBLOCKS:
 875		tp->t_rbmblocks_delta += delta;
 876		break;
 877	case XFS_TRANS_SB_RBLOCKS:
 878		tp->t_rblocks_delta += delta;
 879		break;
 880	case XFS_TRANS_SB_REXTENTS:
 881		tp->t_rextents_delta += delta;
 882		break;
 883	case XFS_TRANS_SB_REXTSLOG:
 884		tp->t_rextslog_delta += delta;
 885		break;
 886	default:
 887		ASSERT(0);
 888		return;
 889	}
 890
 891	tp->t_flags |= flags;
 892}
 893
 894/*
 895 * xfs_trans_apply_sb_deltas() is called from the commit code
 896 * to bring the superblock buffer into the current transaction
 897 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 898 *
 899 * For now we just look at each field allowed to change and change
 900 * it if necessary.
 901 */
 902STATIC void
 903xfs_trans_apply_sb_deltas(
 904	xfs_trans_t	*tp)
 905{
 906	xfs_dsb_t	*sbp;
 907	xfs_buf_t	*bp;
 908	int		whole = 0;
 909
 910	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 911	sbp = XFS_BUF_TO_SBP(bp);
 912
 913	/*
 914	 * Check that superblock mods match the mods made to AGF counters.
 915	 */
 916	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 917	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 918		tp->t_ag_btree_delta));
 919
 920	/*
 921	 * Only update the superblock counters if we are logging them
 922	 */
 923	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 924		if (tp->t_icount_delta)
 925			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 926		if (tp->t_ifree_delta)
 927			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 928		if (tp->t_fdblocks_delta)
 929			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 930		if (tp->t_res_fdblocks_delta)
 931			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 932	}
 933
 934	if (tp->t_frextents_delta)
 935		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 936	if (tp->t_res_frextents_delta)
 937		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 938
 939	if (tp->t_dblocks_delta) {
 940		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 941		whole = 1;
 942	}
 943	if (tp->t_agcount_delta) {
 944		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 945		whole = 1;
 946	}
 947	if (tp->t_imaxpct_delta) {
 948		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 949		whole = 1;
 950	}
 951	if (tp->t_rextsize_delta) {
 952		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 953		whole = 1;
 954	}
 955	if (tp->t_rbmblocks_delta) {
 956		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 957		whole = 1;
 958	}
 959	if (tp->t_rblocks_delta) {
 960		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 961		whole = 1;
 962	}
 963	if (tp->t_rextents_delta) {
 964		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 965		whole = 1;
 966	}
 967	if (tp->t_rextslog_delta) {
 968		sbp->sb_rextslog += tp->t_rextslog_delta;
 969		whole = 1;
 970	}
 971
 
 972	if (whole)
 973		/*
 974		 * Log the whole thing, the fields are noncontiguous.
 975		 */
 976		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 977	else
 978		/*
 979		 * Since all the modifiable fields are contiguous, we
 980		 * can get away with this.
 981		 */
 982		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 983				  offsetof(xfs_dsb_t, sb_frextents) +
 984				  sizeof(sbp->sb_frextents) - 1);
 985}
 986
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 987/*
 988 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 989 * and apply superblock counter changes to the in-core superblock.  The
 990 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 991 * applied to the in-core superblock.  The idea is that that has already been
 992 * done.
 993 *
 994 * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
 995 * However, we have to ensure that we only modify each superblock field only
 996 * once because the application of the delta values may not be atomic. That can
 997 * lead to ENOSPC races occurring if we have two separate modifcations of the
 998 * free space counter to put back the entire reservation and then take away
 999 * what we used.
1000 *
1001 * If we are not logging superblock counters, then the inode allocated/free and
1002 * used block counts are not updated in the on disk superblock. In this case,
1003 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
1004 * still need to update the incore superblock with the changes.
1005 */
1006void
1007xfs_trans_unreserve_and_mod_sb(
1008	xfs_trans_t	*tp)
1009{
1010	xfs_mod_sb_t	msb[9];	/* If you add cases, add entries */
1011	xfs_mod_sb_t	*msbp;
1012	xfs_mount_t	*mp = tp->t_mountp;
1013	/* REFERENCED */
1014	int		error;
1015	int		rsvd;
1016	int64_t		blkdelta = 0;
1017	int64_t		rtxdelta = 0;
1018	int64_t		idelta = 0;
1019	int64_t		ifreedelta = 0;
1020
1021	msbp = msb;
1022	rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
1023
1024	/* calculate deltas */
1025	if (tp->t_blk_res > 0)
1026		blkdelta = tp->t_blk_res;
1027	if ((tp->t_fdblocks_delta != 0) &&
1028	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
1029	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
1030	        blkdelta += tp->t_fdblocks_delta;
1031
1032	if (tp->t_rtx_res > 0)
1033		rtxdelta = tp->t_rtx_res;
1034	if ((tp->t_frextents_delta != 0) &&
1035	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
1036		rtxdelta += tp->t_frextents_delta;
1037
1038	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
1039	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
1040		idelta = tp->t_icount_delta;
1041		ifreedelta = tp->t_ifree_delta;
1042	}
1043
1044	/* apply the per-cpu counters */
1045	if (blkdelta) {
1046		error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
1047						 blkdelta, rsvd);
1048		if (error)
1049			goto out;
1050	}
1051
1052	if (idelta) {
1053		error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT,
1054						 idelta, rsvd);
1055		if (error)
1056			goto out_undo_fdblocks;
1057	}
1058
1059	if (ifreedelta) {
1060		error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE,
1061						 ifreedelta, rsvd);
1062		if (error)
1063			goto out_undo_icount;
1064	}
1065
 
 
 
1066	/* apply remaining deltas */
1067	if (rtxdelta != 0) {
1068		msbp->msb_field = XFS_SBS_FREXTENTS;
1069		msbp->msb_delta = rtxdelta;
1070		msbp++;
 
1071	}
1072
1073	if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
1074		if (tp->t_dblocks_delta != 0) {
1075			msbp->msb_field = XFS_SBS_DBLOCKS;
1076			msbp->msb_delta = tp->t_dblocks_delta;
1077			msbp++;
1078		}
1079		if (tp->t_agcount_delta != 0) {
1080			msbp->msb_field = XFS_SBS_AGCOUNT;
1081			msbp->msb_delta = tp->t_agcount_delta;
1082			msbp++;
1083		}
1084		if (tp->t_imaxpct_delta != 0) {
1085			msbp->msb_field = XFS_SBS_IMAX_PCT;
1086			msbp->msb_delta = tp->t_imaxpct_delta;
1087			msbp++;
1088		}
1089		if (tp->t_rextsize_delta != 0) {
1090			msbp->msb_field = XFS_SBS_REXTSIZE;
1091			msbp->msb_delta = tp->t_rextsize_delta;
1092			msbp++;
1093		}
1094		if (tp->t_rbmblocks_delta != 0) {
1095			msbp->msb_field = XFS_SBS_RBMBLOCKS;
1096			msbp->msb_delta = tp->t_rbmblocks_delta;
1097			msbp++;
1098		}
1099		if (tp->t_rblocks_delta != 0) {
1100			msbp->msb_field = XFS_SBS_RBLOCKS;
1101			msbp->msb_delta = tp->t_rblocks_delta;
1102			msbp++;
1103		}
1104		if (tp->t_rextents_delta != 0) {
1105			msbp->msb_field = XFS_SBS_REXTENTS;
1106			msbp->msb_delta = tp->t_rextents_delta;
1107			msbp++;
1108		}
1109		if (tp->t_rextslog_delta != 0) {
1110			msbp->msb_field = XFS_SBS_REXTSLOG;
1111			msbp->msb_delta = tp->t_rextslog_delta;
1112			msbp++;
1113		}
1114	}
1115
1116	/*
1117	 * If we need to change anything, do it.
1118	 */
1119	if (msbp > msb) {
1120		error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
1121			(uint)(msbp - msb), rsvd);
1122		if (error)
1123			goto out_undo_ifreecount;
1124	}
1125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1126	return;
1127
1128out_undo_ifreecount:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129	if (ifreedelta)
1130		xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd);
1131out_undo_icount:
1132	if (idelta)
1133		xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd);
1134out_undo_fdblocks:
1135	if (blkdelta)
1136		xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
1137out:
1138	ASSERT(error == 0);
1139	return;
1140}
1141
1142/*
1143 * Add the given log item to the transaction's list of log items.
1144 *
1145 * The log item will now point to its new descriptor with its li_desc field.
1146 */
1147void
1148xfs_trans_add_item(
1149	struct xfs_trans	*tp,
1150	struct xfs_log_item	*lip)
1151{
1152	struct xfs_log_item_desc *lidp;
1153
1154	ASSERT(lip->li_mountp = tp->t_mountp);
1155	ASSERT(lip->li_ailp = tp->t_mountp->m_ail);
1156
1157	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
1158
1159	lidp->lid_item = lip;
1160	lidp->lid_flags = 0;
1161	lidp->lid_size = 0;
1162	list_add_tail(&lidp->lid_trans, &tp->t_items);
1163
1164	lip->li_desc = lidp;
1165}
1166
1167STATIC void
1168xfs_trans_free_item_desc(
1169	struct xfs_log_item_desc *lidp)
1170{
1171	list_del_init(&lidp->lid_trans);
1172	kmem_zone_free(xfs_log_item_desc_zone, lidp);
1173}
1174
1175/*
1176 * Unlink and free the given descriptor.
1177 */
1178void
1179xfs_trans_del_item(
1180	struct xfs_log_item	*lip)
1181{
1182	xfs_trans_free_item_desc(lip->li_desc);
1183	lip->li_desc = NULL;
1184}
1185
1186/*
1187 * Unlock all of the items of a transaction and free all the descriptors
1188 * of that transaction.
1189 */
1190void
1191xfs_trans_free_items(
1192	struct xfs_trans	*tp,
1193	xfs_lsn_t		commit_lsn,
1194	int			flags)
1195{
1196	struct xfs_log_item_desc *lidp, *next;
1197
1198	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
1199		struct xfs_log_item	*lip = lidp->lid_item;
1200
1201		lip->li_desc = NULL;
1202
1203		if (commit_lsn != NULLCOMMITLSN)
1204			IOP_COMMITTING(lip, commit_lsn);
1205		if (flags & XFS_TRANS_ABORT)
1206			lip->li_flags |= XFS_LI_ABORTED;
1207		IOP_UNLOCK(lip);
1208
1209		xfs_trans_free_item_desc(lidp);
1210	}
1211}
1212
1213/*
1214 * Unlock the items associated with a transaction.
1215 *
1216 * Items which were not logged should be freed.  Those which were logged must
1217 * still be tracked so they can be unpinned when the transaction commits.
1218 */
1219STATIC void
1220xfs_trans_unlock_items(
1221	struct xfs_trans	*tp,
1222	xfs_lsn_t		commit_lsn)
1223{
1224	struct xfs_log_item_desc *lidp, *next;
1225
1226	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
1227		struct xfs_log_item	*lip = lidp->lid_item;
1228
1229		lip->li_desc = NULL;
1230
1231		if (commit_lsn != NULLCOMMITLSN)
1232			IOP_COMMITTING(lip, commit_lsn);
1233		IOP_UNLOCK(lip);
1234
1235		/*
1236		 * Free the descriptor if the item is not dirty
1237		 * within this transaction.
1238		 */
1239		if (!(lidp->lid_flags & XFS_LID_DIRTY))
1240			xfs_trans_free_item_desc(lidp);
1241	}
1242}
1243
1244/*
1245 * Total up the number of log iovecs needed to commit this
1246 * transaction.  The transaction itself needs one for the
1247 * transaction header.  Ask each dirty item in turn how many
1248 * it needs to get the total.
1249 */
1250static uint
1251xfs_trans_count_vecs(
1252	struct xfs_trans	*tp)
1253{
1254	int			nvecs;
1255	struct xfs_log_item_desc *lidp;
1256
1257	nvecs = 1;
1258
1259	/* In the non-debug case we need to start bailing out if we
1260	 * didn't find a log_item here, return zero and let trans_commit
1261	 * deal with it.
1262	 */
1263	if (list_empty(&tp->t_items)) {
1264		ASSERT(0);
1265		return 0;
1266	}
1267
1268	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
1269		/*
1270		 * Skip items which aren't dirty in this transaction.
1271		 */
1272		if (!(lidp->lid_flags & XFS_LID_DIRTY))
1273			continue;
1274		lidp->lid_size = IOP_SIZE(lidp->lid_item);
1275		nvecs += lidp->lid_size;
1276	}
1277
1278	return nvecs;
1279}
1280
1281/*
1282 * Fill in the vector with pointers to data to be logged
1283 * by this transaction.  The transaction header takes
1284 * the first vector, and then each dirty item takes the
1285 * number of vectors it indicated it needed in xfs_trans_count_vecs().
1286 *
1287 * As each item fills in the entries it needs, also pin the item
1288 * so that it cannot be flushed out until the log write completes.
1289 */
1290static void
1291xfs_trans_fill_vecs(
1292	struct xfs_trans	*tp,
1293	struct xfs_log_iovec	*log_vector)
1294{
1295	struct xfs_log_item_desc *lidp;
1296	struct xfs_log_iovec	*vecp;
1297	uint			nitems;
1298
1299	/*
1300	 * Skip over the entry for the transaction header, we'll
1301	 * fill that in at the end.
1302	 */
1303	vecp = log_vector + 1;
1304
1305	nitems = 0;
1306	ASSERT(!list_empty(&tp->t_items));
1307	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
1308		/* Skip items which aren't dirty in this transaction. */
1309		if (!(lidp->lid_flags & XFS_LID_DIRTY))
1310			continue;
1311
1312		/*
1313		 * The item may be marked dirty but not log anything.  This can
1314		 * be used to get called when a transaction is committed.
1315		 */
1316		if (lidp->lid_size)
1317			nitems++;
1318		IOP_FORMAT(lidp->lid_item, vecp);
1319		vecp += lidp->lid_size;
1320		IOP_PIN(lidp->lid_item);
1321	}
1322
1323	/*
1324	 * Now that we've counted the number of items in this transaction, fill
1325	 * in the transaction header. Note that the transaction header does not
1326	 * have a log item.
1327	 */
1328	tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
1329	tp->t_header.th_type = tp->t_type;
1330	tp->t_header.th_num_items = nitems;
1331	log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
1332	log_vector->i_len = sizeof(xfs_trans_header_t);
1333	log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
1334}
1335
1336/*
1337 * The committed item processing consists of calling the committed routine of
1338 * each logged item, updating the item's position in the AIL if necessary, and
1339 * unpinning each item.  If the committed routine returns -1, then do nothing
1340 * further with the item because it may have been freed.
1341 *
1342 * Since items are unlocked when they are copied to the incore log, it is
1343 * possible for two transactions to be completing and manipulating the same
1344 * item simultaneously.  The AIL lock will protect the lsn field of each item.
1345 * The value of this field can never go backwards.
1346 *
1347 * We unpin the items after repositioning them in the AIL, because otherwise
1348 * they could be immediately flushed and we'd have to race with the flusher
1349 * trying to pull the item from the AIL as we add it.
1350 */
1351static void
1352xfs_trans_item_committed(
1353	struct xfs_log_item	*lip,
1354	xfs_lsn_t		commit_lsn,
1355	int			aborted)
1356{
1357	xfs_lsn_t		item_lsn;
1358	struct xfs_ail		*ailp;
1359
1360	if (aborted)
1361		lip->li_flags |= XFS_LI_ABORTED;
1362	item_lsn = IOP_COMMITTED(lip, commit_lsn);
1363
1364	/* item_lsn of -1 means the item needs no further processing */
1365	if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
1366		return;
1367
1368	/*
1369	 * If the returned lsn is greater than what it contained before, update
1370	 * the location of the item in the AIL.  If it is not, then do nothing.
1371	 * Items can never move backwards in the AIL.
1372	 *
1373	 * While the new lsn should usually be greater, it is possible that a
1374	 * later transaction completing simultaneously with an earlier one
1375	 * using the same item could complete first with a higher lsn.  This
1376	 * would cause the earlier transaction to fail the test below.
1377	 */
1378	ailp = lip->li_ailp;
1379	spin_lock(&ailp->xa_lock);
1380	if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
1381		/*
1382		 * This will set the item's lsn to item_lsn and update the
1383		 * position of the item in the AIL.
1384		 *
1385		 * xfs_trans_ail_update() drops the AIL lock.
1386		 */
1387		xfs_trans_ail_update(ailp, lip, item_lsn);
1388	} else {
1389		spin_unlock(&ailp->xa_lock);
1390	}
1391
1392	/*
1393	 * Now that we've repositioned the item in the AIL, unpin it so it can
1394	 * be flushed. Pass information about buffer stale state down from the
1395	 * log item flags, if anyone else stales the buffer we do not want to
1396	 * pay any attention to it.
1397	 */
1398	IOP_UNPIN(lip, 0);
1399}
1400
1401/*
1402 * This is typically called by the LM when a transaction has been fully
1403 * committed to disk.  It needs to unpin the items which have
1404 * been logged by the transaction and update their positions
1405 * in the AIL if necessary.
1406 *
1407 * This also gets called when the transactions didn't get written out
1408 * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
1409 */
1410STATIC void
1411xfs_trans_committed(
1412	void			*arg,
1413	int			abortflag)
1414{
1415	struct xfs_trans	*tp = arg;
1416	struct xfs_log_item_desc *lidp, *next;
1417
1418	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
1419		xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
1420		xfs_trans_free_item_desc(lidp);
1421	}
1422
1423	xfs_trans_free(tp);
1424}
1425
1426static inline void
1427xfs_log_item_batch_insert(
1428	struct xfs_ail		*ailp,
1429	struct xfs_ail_cursor	*cur,
1430	struct xfs_log_item	**log_items,
1431	int			nr_items,
1432	xfs_lsn_t		commit_lsn)
1433{
1434	int	i;
1435
1436	spin_lock(&ailp->xa_lock);
1437	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
1438	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
1439
1440	for (i = 0; i < nr_items; i++)
1441		IOP_UNPIN(log_items[i], 0);
 
 
 
1442}
1443
1444/*
1445 * Bulk operation version of xfs_trans_committed that takes a log vector of
1446 * items to insert into the AIL. This uses bulk AIL insertion techniques to
1447 * minimise lock traffic.
1448 *
1449 * If we are called with the aborted flag set, it is because a log write during
1450 * a CIL checkpoint commit has failed. In this case, all the items in the
1451 * checkpoint have already gone through IOP_COMMITED and IOP_UNLOCK, which
1452 * means that checkpoint commit abort handling is treated exactly the same
1453 * as an iclog write error even though we haven't started any IO yet. Hence in
1454 * this case all we need to do is IOP_COMMITTED processing, followed by an
1455 * IOP_UNPIN(aborted) call.
1456 *
1457 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
1458 * at the end of the AIL, the insert cursor avoids the need to walk
1459 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
1460 * call. This saves a lot of needless list walking and is a net win, even
1461 * though it slightly increases that amount of AIL lock traffic to set it up
1462 * and tear it down.
1463 */
1464void
1465xfs_trans_committed_bulk(
1466	struct xfs_ail		*ailp,
1467	struct xfs_log_vec	*log_vector,
1468	xfs_lsn_t		commit_lsn,
1469	int			aborted)
1470{
1471#define LOG_ITEM_BATCH_SIZE	32
1472	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
1473	struct xfs_log_vec	*lv;
1474	struct xfs_ail_cursor	cur;
1475	int			i = 0;
1476
1477	spin_lock(&ailp->xa_lock);
1478	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
1479	spin_unlock(&ailp->xa_lock);
1480
1481	/* unpin all the log items */
1482	for (lv = log_vector; lv; lv = lv->lv_next ) {
1483		struct xfs_log_item	*lip = lv->lv_item;
1484		xfs_lsn_t		item_lsn;
1485
1486		if (aborted)
1487			lip->li_flags |= XFS_LI_ABORTED;
1488		item_lsn = IOP_COMMITTED(lip, commit_lsn);
1489
1490		/* item_lsn of -1 means the item needs no further processing */
1491		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
1492			continue;
1493
1494		/*
1495		 * if we are aborting the operation, no point in inserting the
1496		 * object into the AIL as we are in a shutdown situation.
1497		 */
1498		if (aborted) {
1499			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
1500			IOP_UNPIN(lip, 1);
1501			continue;
1502		}
1503
1504		if (item_lsn != commit_lsn) {
1505
1506			/*
1507			 * Not a bulk update option due to unusual item_lsn.
1508			 * Push into AIL immediately, rechecking the lsn once
1509			 * we have the ail lock. Then unpin the item. This does
1510			 * not affect the AIL cursor the bulk insert path is
1511			 * using.
1512			 */
1513			spin_lock(&ailp->xa_lock);
1514			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
1515				xfs_trans_ail_update(ailp, lip, item_lsn);
1516			else
1517				spin_unlock(&ailp->xa_lock);
1518			IOP_UNPIN(lip, 0);
1519			continue;
1520		}
1521
1522		/* Item is a candidate for bulk AIL insert.  */
1523		log_items[i++] = lv->lv_item;
1524		if (i >= LOG_ITEM_BATCH_SIZE) {
1525			xfs_log_item_batch_insert(ailp, &cur, log_items,
1526					LOG_ITEM_BATCH_SIZE, commit_lsn);
1527			i = 0;
1528		}
1529	}
1530
1531	/* make sure we insert the remainder! */
1532	if (i)
1533		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
1534
1535	spin_lock(&ailp->xa_lock);
1536	xfs_trans_ail_cursor_done(ailp, &cur);
1537	spin_unlock(&ailp->xa_lock);
1538}
1539
1540/*
1541 * Called from the trans_commit code when we notice that the filesystem is in
1542 * the middle of a forced shutdown.
1543 *
1544 * When we are called here, we have already pinned all the items in the
1545 * transaction. However, neither IOP_COMMITTING or IOP_UNLOCK has been called
1546 * so we can simply walk the items in the transaction, unpin them with an abort
1547 * flag and then free the items. Note that unpinning the items can result in
1548 * them being freed immediately, so we need to use a safe list traversal method
1549 * here.
1550 */
1551STATIC void
1552xfs_trans_uncommit(
1553	struct xfs_trans	*tp,
1554	uint			flags)
1555{
1556	struct xfs_log_item_desc *lidp, *n;
1557
1558	list_for_each_entry_safe(lidp, n, &tp->t_items, lid_trans) {
1559		if (lidp->lid_flags & XFS_LID_DIRTY)
1560			IOP_UNPIN(lidp->lid_item, 1);
1561	}
1562
1563	xfs_trans_unreserve_and_mod_sb(tp);
1564	xfs_trans_unreserve_and_mod_dquots(tp);
1565
1566	xfs_trans_free_items(tp, NULLCOMMITLSN, flags);
1567	xfs_trans_free(tp);
1568}
1569
1570/*
1571 * Format the transaction direct to the iclog. This isolates the physical
1572 * transaction commit operation from the logical operation and hence allows
1573 * other methods to be introduced without affecting the existing commit path.
1574 */
1575static int
1576xfs_trans_commit_iclog(
1577	struct xfs_mount	*mp,
1578	struct xfs_trans	*tp,
1579	xfs_lsn_t		*commit_lsn,
1580	int			flags)
1581{
1582	int			shutdown;
1583	int			error;
1584	int			log_flags = 0;
1585	struct xlog_in_core	*commit_iclog;
1586#define XFS_TRANS_LOGVEC_COUNT  16
1587	struct xfs_log_iovec	log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
1588	struct xfs_log_iovec	*log_vector;
1589	uint			nvec;
1590
1591
1592	/*
1593	 * Ask each log item how many log_vector entries it will
1594	 * need so we can figure out how many to allocate.
1595	 * Try to avoid the kmem_alloc() call in the common case
1596	 * by using a vector from the stack when it fits.
1597	 */
1598	nvec = xfs_trans_count_vecs(tp);
1599	if (nvec == 0) {
1600		return ENOMEM;	/* triggers a shutdown! */
1601	} else if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
1602		log_vector = log_vector_fast;
1603	} else {
1604		log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec *
1605						   sizeof(xfs_log_iovec_t),
1606						   KM_SLEEP);
1607	}
1608
1609	/*
1610	 * Fill in the log_vector and pin the logged items, and
1611	 * then write the transaction to the log.
1612	 */
1613	xfs_trans_fill_vecs(tp, log_vector);
1614
1615	if (flags & XFS_TRANS_RELEASE_LOG_RES)
1616		log_flags = XFS_LOG_REL_PERM_RESERV;
1617
1618	error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn));
1619
1620	/*
1621	 * The transaction is committed incore here, and can go out to disk
1622	 * at any time after this call.  However, all the items associated
1623	 * with the transaction are still locked and pinned in memory.
1624	 */
1625	*commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
1626
1627	tp->t_commit_lsn = *commit_lsn;
1628	trace_xfs_trans_commit_lsn(tp);
1629
1630	if (nvec > XFS_TRANS_LOGVEC_COUNT)
1631		kmem_free(log_vector);
1632
1633	/*
1634	 * If we got a log write error. Unpin the logitems that we
1635	 * had pinned, clean up, free trans structure, and return error.
1636	 */
1637	if (error || *commit_lsn == -1) {
1638		current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1639		xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
1640		return XFS_ERROR(EIO);
1641	}
1642
1643	/*
1644	 * Once the transaction has committed, unused
1645	 * reservations need to be released and changes to
1646	 * the superblock need to be reflected in the in-core
1647	 * version.  Do that now.
1648	 */
1649	xfs_trans_unreserve_and_mod_sb(tp);
1650
1651	/*
1652	 * Tell the LM to call the transaction completion routine
1653	 * when the log write with LSN commit_lsn completes (e.g.
1654	 * when the transaction commit really hits the on-disk log).
1655	 * After this call we cannot reference tp, because the call
1656	 * can happen at any time and the call will free the transaction
1657	 * structure pointed to by tp.  The only case where we call
1658	 * the completion routine (xfs_trans_committed) directly is
1659	 * if the log is turned off on a debug kernel or we're
1660	 * running in simulation mode (the log is explicitly turned
1661	 * off).
1662	 */
1663	tp->t_logcb.cb_func = xfs_trans_committed;
1664	tp->t_logcb.cb_arg = tp;
1665
1666	/*
1667	 * We need to pass the iclog buffer which was used for the
1668	 * transaction commit record into this function, and attach
1669	 * the callback to it. The callback must be attached before
1670	 * the items are unlocked to avoid racing with other threads
1671	 * waiting for an item to unlock.
1672	 */
1673	shutdown = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb));
1674
1675	/*
1676	 * Mark this thread as no longer being in a transaction
1677	 */
1678	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1679
1680	/*
1681	 * Once all the items of the transaction have been copied
1682	 * to the in core log and the callback is attached, the
1683	 * items can be unlocked.
1684	 *
1685	 * This will free descriptors pointing to items which were
1686	 * not logged since there is nothing more to do with them.
1687	 * For items which were logged, we will keep pointers to them
1688	 * so they can be unpinned after the transaction commits to disk.
1689	 * This will also stamp each modified meta-data item with
1690	 * the commit lsn of this transaction for dependency tracking
1691	 * purposes.
1692	 */
1693	xfs_trans_unlock_items(tp, *commit_lsn);
1694
1695	/*
1696	 * If we detected a log error earlier, finish committing
1697	 * the transaction now (unpin log items, etc).
1698	 *
1699	 * Order is critical here, to avoid using the transaction
1700	 * pointer after its been freed (by xfs_trans_committed
1701	 * either here now, or as a callback).  We cannot do this
1702	 * step inside xfs_log_notify as was done earlier because
1703	 * of this issue.
1704	 */
1705	if (shutdown)
1706		xfs_trans_committed(tp, XFS_LI_ABORTED);
1707
1708	/*
1709	 * Now that the xfs_trans_committed callback has been attached,
1710	 * and the items are released we can finally allow the iclog to
1711	 * go to disk.
1712	 */
1713	return xfs_log_release_iclog(mp, commit_iclog);
1714}
1715
1716/*
1717 * Walk the log items and allocate log vector structures for
1718 * each item large enough to fit all the vectors they require.
1719 * Note that this format differs from the old log vector format in
1720 * that there is no transaction header in these log vectors.
1721 */
1722STATIC struct xfs_log_vec *
1723xfs_trans_alloc_log_vecs(
1724	xfs_trans_t	*tp)
1725{
1726	struct xfs_log_item_desc *lidp;
1727	struct xfs_log_vec	*lv = NULL;
1728	struct xfs_log_vec	*ret_lv = NULL;
1729
1730
1731	/* Bail out if we didn't find a log item.  */
1732	if (list_empty(&tp->t_items)) {
1733		ASSERT(0);
1734		return NULL;
1735	}
1736
1737	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
1738		struct xfs_log_vec *new_lv;
1739
1740		/* Skip items which aren't dirty in this transaction. */
1741		if (!(lidp->lid_flags & XFS_LID_DIRTY))
1742			continue;
1743
1744		/* Skip items that do not have any vectors for writing */
1745		lidp->lid_size = IOP_SIZE(lidp->lid_item);
1746		if (!lidp->lid_size)
1747			continue;
1748
1749		new_lv = kmem_zalloc(sizeof(*new_lv) +
1750				lidp->lid_size * sizeof(struct xfs_log_iovec),
1751				KM_SLEEP);
1752
1753		/* The allocated iovec region lies beyond the log vector. */
1754		new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
1755		new_lv->lv_niovecs = lidp->lid_size;
1756		new_lv->lv_item = lidp->lid_item;
1757		if (!ret_lv)
1758			ret_lv = new_lv;
1759		else
1760			lv->lv_next = new_lv;
1761		lv = new_lv;
1762	}
1763
1764	return ret_lv;
1765}
1766
1767static int
1768xfs_trans_commit_cil(
1769	struct xfs_mount	*mp,
1770	struct xfs_trans	*tp,
1771	xfs_lsn_t		*commit_lsn,
1772	int			flags)
1773{
1774	struct xfs_log_vec	*log_vector;
1775
1776	/*
1777	 * Get each log item to allocate a vector structure for
1778	 * the log item to to pass to the log write code. The
1779	 * CIL commit code will format the vector and save it away.
1780	 */
1781	log_vector = xfs_trans_alloc_log_vecs(tp);
1782	if (!log_vector)
1783		return ENOMEM;
1784
1785	xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
1786
1787	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1788	xfs_trans_free(tp);
1789	return 0;
1790}
1791
1792/*
1793 * xfs_trans_commit
1794 *
1795 * Commit the given transaction to the log a/synchronously.
1796 *
1797 * XFS disk error handling mechanism is not based on a typical
1798 * transaction abort mechanism. Logically after the filesystem
1799 * gets marked 'SHUTDOWN', we can't let any new transactions
1800 * be durable - ie. committed to disk - because some metadata might
1801 * be inconsistent. In such cases, this returns an error, and the
1802 * caller may assume that all locked objects joined to the transaction
1803 * have already been unlocked as if the commit had succeeded.
1804 * Do not reference the transaction structure after this call.
1805 */
1806int
1807_xfs_trans_commit(
1808	struct xfs_trans	*tp,
1809	uint			flags,
1810	int			*log_flushed)
1811{
1812	struct xfs_mount	*mp = tp->t_mountp;
1813	xfs_lsn_t		commit_lsn = -1;
1814	int			error = 0;
1815	int			log_flags = 0;
1816	int			sync = tp->t_flags & XFS_TRANS_SYNC;
1817
1818	/*
1819	 * Determine whether this commit is releasing a permanent
1820	 * log reservation or not.
1821	 */
1822	if (flags & XFS_TRANS_RELEASE_LOG_RES) {
1823		ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1824		log_flags = XFS_LOG_REL_PERM_RESERV;
1825	}
1826
1827	/*
1828	 * If there is nothing to be logged by the transaction,
1829	 * then unlock all of the items associated with the
1830	 * transaction and free the transaction structure.
1831	 * Also make sure to return any reserved blocks to
1832	 * the free pool.
1833	 */
1834	if (!(tp->t_flags & XFS_TRANS_DIRTY))
1835		goto out_unreserve;
1836
1837	if (XFS_FORCED_SHUTDOWN(mp)) {
1838		error = XFS_ERROR(EIO);
1839		goto out_unreserve;
1840	}
1841
1842	ASSERT(tp->t_ticket != NULL);
1843
1844	/*
1845	 * If we need to update the superblock, then do it now.
1846	 */
1847	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
1848		xfs_trans_apply_sb_deltas(tp);
1849	xfs_trans_apply_dquot_deltas(tp);
1850
1851	if (mp->m_flags & XFS_MOUNT_DELAYLOG)
1852		error = xfs_trans_commit_cil(mp, tp, &commit_lsn, flags);
1853	else
1854		error = xfs_trans_commit_iclog(mp, tp, &commit_lsn, flags);
1855
1856	if (error == ENOMEM) {
1857		xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1858		error = XFS_ERROR(EIO);
1859		goto out_unreserve;
1860	}
1861
1862	/*
1863	 * If the transaction needs to be synchronous, then force the
1864	 * log out now and wait for it.
1865	 */
1866	if (sync) {
1867		if (!error) {
1868			error = _xfs_log_force_lsn(mp, commit_lsn,
1869				      XFS_LOG_SYNC, log_flushed);
1870		}
1871		XFS_STATS_INC(xs_trans_sync);
1872	} else {
1873		XFS_STATS_INC(xs_trans_async);
1874	}
1875
1876	return error;
1877
1878out_unreserve:
1879	xfs_trans_unreserve_and_mod_sb(tp);
1880
1881	/*
1882	 * It is indeed possible for the transaction to be not dirty but
1883	 * the dqinfo portion to be.  All that means is that we have some
1884	 * (non-persistent) quota reservations that need to be unreserved.
1885	 */
1886	xfs_trans_unreserve_and_mod_dquots(tp);
1887	if (tp->t_ticket) {
1888		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
1889		if (commit_lsn == -1 && !error)
1890			error = XFS_ERROR(EIO);
1891	}
1892	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1893	xfs_trans_free_items(tp, NULLCOMMITLSN, error ? XFS_TRANS_ABORT : 0);
1894	xfs_trans_free(tp);
1895
1896	XFS_STATS_INC(xs_trans_empty);
1897	return error;
1898}
1899
 
 
 
 
 
 
 
1900/*
1901 * Unlock all of the transaction's items and free the transaction.
1902 * The transaction must not have modified any of its items, because
1903 * there is no way to restore them to their previous state.
1904 *
1905 * If the transaction has made a log reservation, make sure to release
1906 * it as well.
1907 */
1908void
1909xfs_trans_cancel(
1910	xfs_trans_t		*tp,
1911	int			flags)
1912{
1913	int			log_flags;
1914	xfs_mount_t		*mp = tp->t_mountp;
1915
1916	/*
1917	 * See if the caller is being too lazy to figure out if
1918	 * the transaction really needs an abort.
1919	 */
1920	if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY))
1921		flags &= ~XFS_TRANS_ABORT;
1922	/*
1923	 * See if the caller is relying on us to shut down the
1924	 * filesystem.  This happens in paths where we detect
1925	 * corruption and decide to give up.
1926	 */
1927	if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
1928		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1929		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1930	}
1931#ifdef DEBUG
1932	if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) {
1933		struct xfs_log_item_desc *lidp;
1934
1935		list_for_each_entry(lidp, &tp->t_items, lid_trans)
1936			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1937	}
1938#endif
1939	xfs_trans_unreserve_and_mod_sb(tp);
1940	xfs_trans_unreserve_and_mod_dquots(tp);
1941
1942	if (tp->t_ticket) {
1943		if (flags & XFS_TRANS_RELEASE_LOG_RES) {
1944			ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1945			log_flags = XFS_LOG_REL_PERM_RESERV;
1946		} else {
1947			log_flags = 0;
1948		}
1949		xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
1950	}
1951
1952	/* mark this thread as no longer being in a transaction */
1953	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1954
1955	xfs_trans_free_items(tp, NULLCOMMITLSN, flags);
1956	xfs_trans_free(tp);
1957}
1958
1959/*
1960 * Roll from one trans in the sequence of PERMANENT transactions to
1961 * the next: permanent transactions are only flushed out when
1962 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
1963 * as possible to let chunks of it go to the log. So we commit the
1964 * chunk we've been working on and get a new transaction to continue.
1965 */
1966int
1967xfs_trans_roll(
1968	struct xfs_trans	**tpp,
1969	struct xfs_inode	*dp)
1970{
1971	struct xfs_trans	*trans;
1972	unsigned int		logres, count;
1973	int			error;
1974
1975	/*
1976	 * Ensure that the inode is always logged.
1977	 */
1978	trans = *tpp;
1979	xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1980
1981	/*
1982	 * Copy the critical parameters from one trans to the next.
1983	 */
1984	logres = trans->t_log_res;
1985	count = trans->t_log_count;
 
1986	*tpp = xfs_trans_dup(trans);
1987
1988	/*
1989	 * Commit the current transaction.
1990	 * If this commit failed, then it'd just unlock those items that
1991	 * are not marked ihold. That also means that a filesystem shutdown
1992	 * is in progress. The caller takes the responsibility to cancel
1993	 * the duplicate transaction that gets returned.
1994	 */
1995	error = xfs_trans_commit(trans, 0);
1996	if (error)
1997		return (error);
1998
1999	trans = *tpp;
2000
2001	/*
2002	 * transaction commit worked ok so we can drop the extra ticket
2003	 * reference that we gained in xfs_trans_dup()
2004	 */
2005	xfs_log_ticket_put(trans->t_ticket);
2006
2007
2008	/*
2009	 * Reserve space in the log for th next transaction.
2010	 * This also pushes items in the "AIL", the list of logged items,
2011	 * out to disk if they are taking up space at the tail of the log
2012	 * that we want to use.  This requires that either nothing be locked
2013	 * across this call, or that anything that is locked be logged in
2014	 * the prior and the next transactions.
2015	 */
2016	error = xfs_trans_reserve(trans, 0, logres, 0,
2017				  XFS_TRANS_PERM_LOG_RES, count);
2018	/*
2019	 *  Ensure that the inode is in the new transaction and locked.
2020	 */
2021	if (error)
2022		return error;
2023
2024	xfs_trans_ijoin(trans, dp);
2025	return 0;
2026}
v4.17
   1/*
   2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
   3 * Copyright (C) 2010 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
 
 
 
  25#include "xfs_mount.h"
 
 
 
 
 
 
  26#include "xfs_inode.h"
  27#include "xfs_extent_busy.h"
 
 
 
  28#include "xfs_quota.h"
  29#include "xfs_trans.h"
  30#include "xfs_trans_priv.h"
  31#include "xfs_log.h"
 
  32#include "xfs_trace.h"
  33#include "xfs_error.h"
  34
  35kmem_zone_t	*xfs_trans_zone;
  36kmem_zone_t	*xfs_log_item_desc_zone;
  37
  38#if defined(CONFIG_TRACEPOINTS)
  39static void
  40xfs_trans_trace_reservations(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41	struct xfs_mount	*mp)
  42{
  43	struct xfs_trans_res	resv;
  44	struct xfs_trans_res	*res;
  45	struct xfs_trans_res	*end_res;
  46	int			i;
  47
  48	res = (struct xfs_trans_res *)M_RES(mp);
  49	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
  50	for (i = 0; res < end_res; i++, res++)
  51		trace_xfs_trans_resv_calc(mp, i, res);
  52	xfs_log_get_max_trans_res(mp, &resv);
  53	trace_xfs_trans_resv_calc(mp, -1, &resv);
  54}
  55#else
  56# define xfs_trans_trace_reservations(mp)
  57#endif
  58
  59/*
  60 * Initialize the precomputed transaction reservation values
  61 * in the mount structure.
  62 */
  63void
  64xfs_trans_init(
  65	struct xfs_mount	*mp)
  66{
  67	xfs_trans_resv_calc(mp, M_RES(mp));
  68	xfs_trans_trace_reservations(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69}
  70
  71/*
  72 * Free the transaction structure.  If there is more clean up
  73 * to do when the structure is freed, add it here.
  74 */
  75STATIC void
  76xfs_trans_free(
  77	struct xfs_trans	*tp)
  78{
  79	xfs_extent_busy_sort(&tp->t_busy);
  80	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
  81
  82	atomic_dec(&tp->t_mountp->m_active_trans);
  83	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
  84		sb_end_intwrite(tp->t_mountp->m_super);
  85	xfs_trans_free_dqinfo(tp);
  86	kmem_zone_free(xfs_trans_zone, tp);
  87}
  88
  89/*
  90 * This is called to create a new transaction which will share the
  91 * permanent log reservation of the given transaction.  The remaining
  92 * unused block and rt extent reservations are also inherited.  This
  93 * implies that the original transaction is no longer allowed to allocate
  94 * blocks.  Locks and log items, however, are no inherited.  They must
  95 * be added to the new transaction explicitly.
  96 */
  97STATIC xfs_trans_t *
  98xfs_trans_dup(
  99	xfs_trans_t	*tp)
 100{
 101	xfs_trans_t	*ntp;
 102
 103	ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
 104
 105	/*
 106	 * Initialize the new transaction structure.
 107	 */
 108	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
 
 109	ntp->t_mountp = tp->t_mountp;
 110	INIT_LIST_HEAD(&ntp->t_items);
 111	INIT_LIST_HEAD(&ntp->t_busy);
 112
 113	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
 114	ASSERT(tp->t_ticket != NULL);
 115
 116	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
 117		       (tp->t_flags & XFS_TRANS_RESERVE) |
 118		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
 119	/* We gave our writer reference to the new transaction */
 120	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
 121	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
 122
 123	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
 124	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
 125	tp->t_blk_res = tp->t_blk_res_used;
 126
 127	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
 128	tp->t_rtx_res = tp->t_rtx_res_used;
 129	ntp->t_pflags = tp->t_pflags;
 130
 131	xfs_trans_dup_dqinfo(tp, ntp);
 132
 133	atomic_inc(&tp->t_mountp->m_active_trans);
 134	return ntp;
 135}
 136
 137/*
 138 * This is called to reserve free disk blocks and log space for the
 139 * given transaction.  This must be done before allocating any resources
 140 * within the transaction.
 141 *
 142 * This will return ENOSPC if there are not enough blocks available.
 143 * It will sleep waiting for available log space.
 144 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
 145 * is used by long running transactions.  If any one of the reservations
 146 * fails then they will all be backed out.
 147 *
 148 * This does not do quota reservations. That typically is done by the
 149 * caller afterwards.
 150 */
 151static int
 152xfs_trans_reserve(
 153	struct xfs_trans	*tp,
 154	struct xfs_trans_res	*resp,
 155	uint			blocks,
 156	uint			rtextents)
 
 
 157{
 
 158	int		error = 0;
 159	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 160
 161	/* Mark this thread as being in a transaction */
 162	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 163
 164	/*
 165	 * Attempt to reserve the needed disk blocks by decrementing
 166	 * the number needed from the number available.  This will
 167	 * fail if the count would go below zero.
 168	 */
 169	if (blocks > 0) {
 170		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
 
 171		if (error != 0) {
 172			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 173			return -ENOSPC;
 174		}
 175		tp->t_blk_res += blocks;
 176	}
 177
 178	/*
 179	 * Reserve the log space needed for this transaction.
 180	 */
 181	if (resp->tr_logres > 0) {
 182		bool	permanent = false;
 183
 184		ASSERT(tp->t_log_res == 0 ||
 185		       tp->t_log_res == resp->tr_logres);
 186		ASSERT(tp->t_log_count == 0 ||
 187		       tp->t_log_count == resp->tr_logcount);
 188
 189		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
 190			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
 191			permanent = true;
 192		} else {
 193			ASSERT(tp->t_ticket == NULL);
 194			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
 
 195		}
 196
 197		if (tp->t_ticket != NULL) {
 198			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
 199			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
 200		} else {
 201			error = xfs_log_reserve(tp->t_mountp,
 202						resp->tr_logres,
 203						resp->tr_logcount,
 204						&tp->t_ticket, XFS_TRANSACTION,
 205						permanent);
 206		}
 207
 208		if (error)
 209			goto undo_blocks;
 210
 211		tp->t_log_res = resp->tr_logres;
 212		tp->t_log_count = resp->tr_logcount;
 213	}
 214
 215	/*
 216	 * Attempt to reserve the needed realtime extents by decrementing
 217	 * the number needed from the number available.  This will
 218	 * fail if the count would go below zero.
 219	 */
 220	if (rtextents > 0) {
 221		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
 
 222		if (error) {
 223			error = -ENOSPC;
 224			goto undo_log;
 225		}
 226		tp->t_rtx_res += rtextents;
 227	}
 228
 229	return 0;
 230
 231	/*
 232	 * Error cases jump to one of these labels to undo any
 233	 * reservations which have already been performed.
 234	 */
 235undo_log:
 236	if (resp->tr_logres > 0) {
 237		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
 
 
 
 
 
 238		tp->t_ticket = NULL;
 239		tp->t_log_res = 0;
 240		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
 241	}
 242
 243undo_blocks:
 244	if (blocks > 0) {
 245		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
 
 246		tp->t_blk_res = 0;
 247	}
 248
 249	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 250
 251	return error;
 252}
 253
 254int
 255xfs_trans_alloc(
 256	struct xfs_mount	*mp,
 257	struct xfs_trans_res	*resp,
 258	uint			blocks,
 259	uint			rtextents,
 260	uint			flags,
 261	struct xfs_trans	**tpp)
 262{
 263	struct xfs_trans	*tp;
 264	int			error;
 265
 266	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
 267		sb_start_intwrite(mp->m_super);
 268
 269	WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
 270	atomic_inc(&mp->m_active_trans);
 271
 272	tp = kmem_zone_zalloc(xfs_trans_zone,
 273		(flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
 274	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
 275	tp->t_flags = flags;
 276	tp->t_mountp = mp;
 277	INIT_LIST_HEAD(&tp->t_items);
 278	INIT_LIST_HEAD(&tp->t_busy);
 279
 280	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
 281	if (error) {
 282		xfs_trans_cancel(tp);
 283		return error;
 284	}
 285
 286	*tpp = tp;
 287	return 0;
 288}
 289
 290/*
 291 * Create an empty transaction with no reservation.  This is a defensive
 292 * mechanism for routines that query metadata without actually modifying
 293 * them -- if the metadata being queried is somehow cross-linked (think a
 294 * btree block pointer that points higher in the tree), we risk deadlock.
 295 * However, blocks grabbed as part of a transaction can be re-grabbed.
 296 * The verifiers will notice the corrupt block and the operation will fail
 297 * back to userspace without deadlocking.
 298 *
 299 * Note the zero-length reservation; this transaction MUST be cancelled
 300 * without any dirty data.
 301 */
 302int
 303xfs_trans_alloc_empty(
 304	struct xfs_mount		*mp,
 305	struct xfs_trans		**tpp)
 306{
 307	struct xfs_trans_res		resv = {0};
 308
 309	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
 310}
 311
 312/*
 313 * Record the indicated change to the given field for application
 314 * to the file system's superblock when the transaction commits.
 315 * For now, just store the change in the transaction structure.
 316 *
 317 * Mark the transaction structure to indicate that the superblock
 318 * needs to be updated before committing.
 319 *
 320 * Because we may not be keeping track of allocated/free inodes and
 321 * used filesystem blocks in the superblock, we do not mark the
 322 * superblock dirty in this transaction if we modify these fields.
 323 * We still need to update the transaction deltas so that they get
 324 * applied to the incore superblock, but we don't want them to
 325 * cause the superblock to get locked and logged if these are the
 326 * only fields in the superblock that the transaction modifies.
 327 */
 328void
 329xfs_trans_mod_sb(
 330	xfs_trans_t	*tp,
 331	uint		field,
 332	int64_t		delta)
 333{
 334	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
 335	xfs_mount_t	*mp = tp->t_mountp;
 336
 337	switch (field) {
 338	case XFS_TRANS_SB_ICOUNT:
 339		tp->t_icount_delta += delta;
 340		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 341			flags &= ~XFS_TRANS_SB_DIRTY;
 342		break;
 343	case XFS_TRANS_SB_IFREE:
 344		tp->t_ifree_delta += delta;
 345		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 346			flags &= ~XFS_TRANS_SB_DIRTY;
 347		break;
 348	case XFS_TRANS_SB_FDBLOCKS:
 349		/*
 350		 * Track the number of blocks allocated in the transaction.
 351		 * Make sure it does not exceed the number reserved. If so,
 352		 * shutdown as this can lead to accounting inconsistency.
 353		 */
 354		if (delta < 0) {
 355			tp->t_blk_res_used += (uint)-delta;
 356			if (tp->t_blk_res_used > tp->t_blk_res)
 357				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 358		}
 359		tp->t_fdblocks_delta += delta;
 360		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 361			flags &= ~XFS_TRANS_SB_DIRTY;
 362		break;
 363	case XFS_TRANS_SB_RES_FDBLOCKS:
 364		/*
 365		 * The allocation has already been applied to the
 366		 * in-core superblock's counter.  This should only
 367		 * be applied to the on-disk superblock.
 368		 */
 
 369		tp->t_res_fdblocks_delta += delta;
 370		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
 371			flags &= ~XFS_TRANS_SB_DIRTY;
 372		break;
 373	case XFS_TRANS_SB_FREXTENTS:
 374		/*
 375		 * Track the number of blocks allocated in the
 376		 * transaction.  Make sure it does not exceed the
 377		 * number reserved.
 378		 */
 379		if (delta < 0) {
 380			tp->t_rtx_res_used += (uint)-delta;
 381			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
 382		}
 383		tp->t_frextents_delta += delta;
 384		break;
 385	case XFS_TRANS_SB_RES_FREXTENTS:
 386		/*
 387		 * The allocation has already been applied to the
 388		 * in-core superblock's counter.  This should only
 389		 * be applied to the on-disk superblock.
 390		 */
 391		ASSERT(delta < 0);
 392		tp->t_res_frextents_delta += delta;
 393		break;
 394	case XFS_TRANS_SB_DBLOCKS:
 395		ASSERT(delta > 0);
 396		tp->t_dblocks_delta += delta;
 397		break;
 398	case XFS_TRANS_SB_AGCOUNT:
 399		ASSERT(delta > 0);
 400		tp->t_agcount_delta += delta;
 401		break;
 402	case XFS_TRANS_SB_IMAXPCT:
 403		tp->t_imaxpct_delta += delta;
 404		break;
 405	case XFS_TRANS_SB_REXTSIZE:
 406		tp->t_rextsize_delta += delta;
 407		break;
 408	case XFS_TRANS_SB_RBMBLOCKS:
 409		tp->t_rbmblocks_delta += delta;
 410		break;
 411	case XFS_TRANS_SB_RBLOCKS:
 412		tp->t_rblocks_delta += delta;
 413		break;
 414	case XFS_TRANS_SB_REXTENTS:
 415		tp->t_rextents_delta += delta;
 416		break;
 417	case XFS_TRANS_SB_REXTSLOG:
 418		tp->t_rextslog_delta += delta;
 419		break;
 420	default:
 421		ASSERT(0);
 422		return;
 423	}
 424
 425	tp->t_flags |= flags;
 426}
 427
 428/*
 429 * xfs_trans_apply_sb_deltas() is called from the commit code
 430 * to bring the superblock buffer into the current transaction
 431 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 432 *
 433 * For now we just look at each field allowed to change and change
 434 * it if necessary.
 435 */
 436STATIC void
 437xfs_trans_apply_sb_deltas(
 438	xfs_trans_t	*tp)
 439{
 440	xfs_dsb_t	*sbp;
 441	xfs_buf_t	*bp;
 442	int		whole = 0;
 443
 444	bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
 445	sbp = XFS_BUF_TO_SBP(bp);
 446
 447	/*
 448	 * Check that superblock mods match the mods made to AGF counters.
 449	 */
 450	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
 451	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
 452		tp->t_ag_btree_delta));
 453
 454	/*
 455	 * Only update the superblock counters if we are logging them
 456	 */
 457	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
 458		if (tp->t_icount_delta)
 459			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
 460		if (tp->t_ifree_delta)
 461			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
 462		if (tp->t_fdblocks_delta)
 463			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
 464		if (tp->t_res_fdblocks_delta)
 465			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
 466	}
 467
 468	if (tp->t_frextents_delta)
 469		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
 470	if (tp->t_res_frextents_delta)
 471		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
 472
 473	if (tp->t_dblocks_delta) {
 474		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
 475		whole = 1;
 476	}
 477	if (tp->t_agcount_delta) {
 478		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
 479		whole = 1;
 480	}
 481	if (tp->t_imaxpct_delta) {
 482		sbp->sb_imax_pct += tp->t_imaxpct_delta;
 483		whole = 1;
 484	}
 485	if (tp->t_rextsize_delta) {
 486		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
 487		whole = 1;
 488	}
 489	if (tp->t_rbmblocks_delta) {
 490		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
 491		whole = 1;
 492	}
 493	if (tp->t_rblocks_delta) {
 494		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
 495		whole = 1;
 496	}
 497	if (tp->t_rextents_delta) {
 498		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
 499		whole = 1;
 500	}
 501	if (tp->t_rextslog_delta) {
 502		sbp->sb_rextslog += tp->t_rextslog_delta;
 503		whole = 1;
 504	}
 505
 506	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
 507	if (whole)
 508		/*
 509		 * Log the whole thing, the fields are noncontiguous.
 510		 */
 511		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
 512	else
 513		/*
 514		 * Since all the modifiable fields are contiguous, we
 515		 * can get away with this.
 516		 */
 517		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
 518				  offsetof(xfs_dsb_t, sb_frextents) +
 519				  sizeof(sbp->sb_frextents) - 1);
 520}
 521
 522STATIC int
 523xfs_sb_mod8(
 524	uint8_t			*field,
 525	int8_t			delta)
 526{
 527	int8_t			counter = *field;
 528
 529	counter += delta;
 530	if (counter < 0) {
 531		ASSERT(0);
 532		return -EINVAL;
 533	}
 534	*field = counter;
 535	return 0;
 536}
 537
 538STATIC int
 539xfs_sb_mod32(
 540	uint32_t		*field,
 541	int32_t			delta)
 542{
 543	int32_t			counter = *field;
 544
 545	counter += delta;
 546	if (counter < 0) {
 547		ASSERT(0);
 548		return -EINVAL;
 549	}
 550	*field = counter;
 551	return 0;
 552}
 553
 554STATIC int
 555xfs_sb_mod64(
 556	uint64_t		*field,
 557	int64_t			delta)
 558{
 559	int64_t			counter = *field;
 560
 561	counter += delta;
 562	if (counter < 0) {
 563		ASSERT(0);
 564		return -EINVAL;
 565	}
 566	*field = counter;
 567	return 0;
 568}
 569
 570/*
 571 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
 572 * and apply superblock counter changes to the in-core superblock.  The
 573 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
 574 * applied to the in-core superblock.  The idea is that that has already been
 575 * done.
 576 *
 
 
 
 
 
 
 
 577 * If we are not logging superblock counters, then the inode allocated/free and
 578 * used block counts are not updated in the on disk superblock. In this case,
 579 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
 580 * still need to update the incore superblock with the changes.
 581 */
 582void
 583xfs_trans_unreserve_and_mod_sb(
 584	struct xfs_trans	*tp)
 585{
 586	struct xfs_mount	*mp = tp->t_mountp;
 587	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
 588	int64_t			blkdelta = 0;
 589	int64_t			rtxdelta = 0;
 590	int64_t			idelta = 0;
 591	int64_t			ifreedelta = 0;
 592	int			error;
 
 
 
 
 
 
 593
 594	/* calculate deltas */
 595	if (tp->t_blk_res > 0)
 596		blkdelta = tp->t_blk_res;
 597	if ((tp->t_fdblocks_delta != 0) &&
 598	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 599	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
 600	        blkdelta += tp->t_fdblocks_delta;
 601
 602	if (tp->t_rtx_res > 0)
 603		rtxdelta = tp->t_rtx_res;
 604	if ((tp->t_frextents_delta != 0) &&
 605	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
 606		rtxdelta += tp->t_frextents_delta;
 607
 608	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 609	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
 610		idelta = tp->t_icount_delta;
 611		ifreedelta = tp->t_ifree_delta;
 612	}
 613
 614	/* apply the per-cpu counters */
 615	if (blkdelta) {
 616		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
 
 617		if (error)
 618			goto out;
 619	}
 620
 621	if (idelta) {
 622		error = xfs_mod_icount(mp, idelta);
 
 623		if (error)
 624			goto out_undo_fdblocks;
 625	}
 626
 627	if (ifreedelta) {
 628		error = xfs_mod_ifree(mp, ifreedelta);
 
 629		if (error)
 630			goto out_undo_icount;
 631	}
 632
 633	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
 634		return;
 635
 636	/* apply remaining deltas */
 637	spin_lock(&mp->m_sb_lock);
 638	if (rtxdelta) {
 639		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
 640		if (error)
 641			goto out_undo_ifree;
 642	}
 643
 644	if (tp->t_dblocks_delta != 0) {
 645		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
 646		if (error)
 647			goto out_undo_frextents;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 648	}
 649	if (tp->t_agcount_delta != 0) {
 650		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
 
 
 
 
 
 651		if (error)
 652			goto out_undo_dblocks;
 653	}
 654	if (tp->t_imaxpct_delta != 0) {
 655		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
 656		if (error)
 657			goto out_undo_agcount;
 658	}
 659	if (tp->t_rextsize_delta != 0) {
 660		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
 661				     tp->t_rextsize_delta);
 662		if (error)
 663			goto out_undo_imaxpct;
 664	}
 665	if (tp->t_rbmblocks_delta != 0) {
 666		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
 667				     tp->t_rbmblocks_delta);
 668		if (error)
 669			goto out_undo_rextsize;
 670	}
 671	if (tp->t_rblocks_delta != 0) {
 672		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
 673		if (error)
 674			goto out_undo_rbmblocks;
 675	}
 676	if (tp->t_rextents_delta != 0) {
 677		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
 678				     tp->t_rextents_delta);
 679		if (error)
 680			goto out_undo_rblocks;
 681	}
 682	if (tp->t_rextslog_delta != 0) {
 683		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
 684				     tp->t_rextslog_delta);
 685		if (error)
 686			goto out_undo_rextents;
 687	}
 688	spin_unlock(&mp->m_sb_lock);
 689	return;
 690
 691out_undo_rextents:
 692	if (tp->t_rextents_delta)
 693		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
 694out_undo_rblocks:
 695	if (tp->t_rblocks_delta)
 696		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
 697out_undo_rbmblocks:
 698	if (tp->t_rbmblocks_delta)
 699		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
 700out_undo_rextsize:
 701	if (tp->t_rextsize_delta)
 702		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
 703out_undo_imaxpct:
 704	if (tp->t_rextsize_delta)
 705		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
 706out_undo_agcount:
 707	if (tp->t_agcount_delta)
 708		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
 709out_undo_dblocks:
 710	if (tp->t_dblocks_delta)
 711		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
 712out_undo_frextents:
 713	if (rtxdelta)
 714		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
 715out_undo_ifree:
 716	spin_unlock(&mp->m_sb_lock);
 717	if (ifreedelta)
 718		xfs_mod_ifree(mp, -ifreedelta);
 719out_undo_icount:
 720	if (idelta)
 721		xfs_mod_icount(mp, -idelta);
 722out_undo_fdblocks:
 723	if (blkdelta)
 724		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
 725out:
 726	ASSERT(error == 0);
 727	return;
 728}
 729
 730/*
 731 * Add the given log item to the transaction's list of log items.
 732 *
 733 * The log item will now point to its new descriptor with its li_desc field.
 734 */
 735void
 736xfs_trans_add_item(
 737	struct xfs_trans	*tp,
 738	struct xfs_log_item	*lip)
 739{
 740	struct xfs_log_item_desc *lidp;
 741
 742	ASSERT(lip->li_mountp == tp->t_mountp);
 743	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
 744
 745	lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
 746
 747	lidp->lid_item = lip;
 748	lidp->lid_flags = 0;
 
 749	list_add_tail(&lidp->lid_trans, &tp->t_items);
 750
 751	lip->li_desc = lidp;
 752}
 753
 754STATIC void
 755xfs_trans_free_item_desc(
 756	struct xfs_log_item_desc *lidp)
 757{
 758	list_del_init(&lidp->lid_trans);
 759	kmem_zone_free(xfs_log_item_desc_zone, lidp);
 760}
 761
 762/*
 763 * Unlink and free the given descriptor.
 764 */
 765void
 766xfs_trans_del_item(
 767	struct xfs_log_item	*lip)
 768{
 769	xfs_trans_free_item_desc(lip->li_desc);
 770	lip->li_desc = NULL;
 771}
 772
 773/*
 774 * Unlock all of the items of a transaction and free all the descriptors
 775 * of that transaction.
 776 */
 777void
 778xfs_trans_free_items(
 779	struct xfs_trans	*tp,
 780	xfs_lsn_t		commit_lsn,
 781	bool			abort)
 782{
 783	struct xfs_log_item_desc *lidp, *next;
 784
 785	list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
 786		struct xfs_log_item	*lip = lidp->lid_item;
 787
 788		lip->li_desc = NULL;
 789
 790		if (commit_lsn != NULLCOMMITLSN)
 791			lip->li_ops->iop_committing(lip, commit_lsn);
 792		if (abort)
 793			lip->li_flags |= XFS_LI_ABORTED;
 794		lip->li_ops->iop_unlock(lip);
 795
 796		xfs_trans_free_item_desc(lidp);
 797	}
 798}
 799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 800static inline void
 801xfs_log_item_batch_insert(
 802	struct xfs_ail		*ailp,
 803	struct xfs_ail_cursor	*cur,
 804	struct xfs_log_item	**log_items,
 805	int			nr_items,
 806	xfs_lsn_t		commit_lsn)
 807{
 808	int	i;
 809
 810	spin_lock(&ailp->ail_lock);
 811	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
 812	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
 813
 814	for (i = 0; i < nr_items; i++) {
 815		struct xfs_log_item *lip = log_items[i];
 816
 817		lip->li_ops->iop_unpin(lip, 0);
 818	}
 819}
 820
 821/*
 822 * Bulk operation version of xfs_trans_committed that takes a log vector of
 823 * items to insert into the AIL. This uses bulk AIL insertion techniques to
 824 * minimise lock traffic.
 825 *
 826 * If we are called with the aborted flag set, it is because a log write during
 827 * a CIL checkpoint commit has failed. In this case, all the items in the
 828 * checkpoint have already gone through iop_commited and iop_unlock, which
 829 * means that checkpoint commit abort handling is treated exactly the same
 830 * as an iclog write error even though we haven't started any IO yet. Hence in
 831 * this case all we need to do is iop_committed processing, followed by an
 832 * iop_unpin(aborted) call.
 833 *
 834 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
 835 * at the end of the AIL, the insert cursor avoids the need to walk
 836 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
 837 * call. This saves a lot of needless list walking and is a net win, even
 838 * though it slightly increases that amount of AIL lock traffic to set it up
 839 * and tear it down.
 840 */
 841void
 842xfs_trans_committed_bulk(
 843	struct xfs_ail		*ailp,
 844	struct xfs_log_vec	*log_vector,
 845	xfs_lsn_t		commit_lsn,
 846	int			aborted)
 847{
 848#define LOG_ITEM_BATCH_SIZE	32
 849	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
 850	struct xfs_log_vec	*lv;
 851	struct xfs_ail_cursor	cur;
 852	int			i = 0;
 853
 854	spin_lock(&ailp->ail_lock);
 855	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
 856	spin_unlock(&ailp->ail_lock);
 857
 858	/* unpin all the log items */
 859	for (lv = log_vector; lv; lv = lv->lv_next ) {
 860		struct xfs_log_item	*lip = lv->lv_item;
 861		xfs_lsn_t		item_lsn;
 862
 863		if (aborted)
 864			lip->li_flags |= XFS_LI_ABORTED;
 865		item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
 866
 867		/* item_lsn of -1 means the item needs no further processing */
 868		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 869			continue;
 870
 871		/*
 872		 * if we are aborting the operation, no point in inserting the
 873		 * object into the AIL as we are in a shutdown situation.
 874		 */
 875		if (aborted) {
 876			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
 877			lip->li_ops->iop_unpin(lip, 1);
 878			continue;
 879		}
 880
 881		if (item_lsn != commit_lsn) {
 882
 883			/*
 884			 * Not a bulk update option due to unusual item_lsn.
 885			 * Push into AIL immediately, rechecking the lsn once
 886			 * we have the ail lock. Then unpin the item. This does
 887			 * not affect the AIL cursor the bulk insert path is
 888			 * using.
 889			 */
 890			spin_lock(&ailp->ail_lock);
 891			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
 892				xfs_trans_ail_update(ailp, lip, item_lsn);
 893			else
 894				spin_unlock(&ailp->ail_lock);
 895			lip->li_ops->iop_unpin(lip, 0);
 896			continue;
 897		}
 898
 899		/* Item is a candidate for bulk AIL insert.  */
 900		log_items[i++] = lv->lv_item;
 901		if (i >= LOG_ITEM_BATCH_SIZE) {
 902			xfs_log_item_batch_insert(ailp, &cur, log_items,
 903					LOG_ITEM_BATCH_SIZE, commit_lsn);
 904			i = 0;
 905		}
 906	}
 907
 908	/* make sure we insert the remainder! */
 909	if (i)
 910		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
 911
 912	spin_lock(&ailp->ail_lock);
 913	xfs_trans_ail_cursor_done(&cur);
 914	spin_unlock(&ailp->ail_lock);
 915}
 916
 917/*
 918 * Commit the given transaction to the log.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919 *
 920 * XFS disk error handling mechanism is not based on a typical
 921 * transaction abort mechanism. Logically after the filesystem
 922 * gets marked 'SHUTDOWN', we can't let any new transactions
 923 * be durable - ie. committed to disk - because some metadata might
 924 * be inconsistent. In such cases, this returns an error, and the
 925 * caller may assume that all locked objects joined to the transaction
 926 * have already been unlocked as if the commit had succeeded.
 927 * Do not reference the transaction structure after this call.
 928 */
 929static int
 930__xfs_trans_commit(
 931	struct xfs_trans	*tp,
 932	bool			regrant)
 
 933{
 934	struct xfs_mount	*mp = tp->t_mountp;
 935	xfs_lsn_t		commit_lsn = -1;
 936	int			error = 0;
 
 937	int			sync = tp->t_flags & XFS_TRANS_SYNC;
 938
 939	/*
 
 
 
 
 
 
 
 
 
 940	 * If there is nothing to be logged by the transaction,
 941	 * then unlock all of the items associated with the
 942	 * transaction and free the transaction structure.
 943	 * Also make sure to return any reserved blocks to
 944	 * the free pool.
 945	 */
 946	if (!(tp->t_flags & XFS_TRANS_DIRTY))
 947		goto out_unreserve;
 948
 949	if (XFS_FORCED_SHUTDOWN(mp)) {
 950		error = -EIO;
 951		goto out_unreserve;
 952	}
 953
 954	ASSERT(tp->t_ticket != NULL);
 955
 956	/*
 957	 * If we need to update the superblock, then do it now.
 958	 */
 959	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
 960		xfs_trans_apply_sb_deltas(tp);
 961	xfs_trans_apply_dquot_deltas(tp);
 962
 963	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
 
 
 
 964
 965	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 966	xfs_trans_free(tp);
 
 
 
 967
 968	/*
 969	 * If the transaction needs to be synchronous, then force the
 970	 * log out now and wait for it.
 971	 */
 972	if (sync) {
 973		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
 974		XFS_STATS_INC(mp, xs_trans_sync);
 
 
 
 975	} else {
 976		XFS_STATS_INC(mp, xs_trans_async);
 977	}
 978
 979	return error;
 980
 981out_unreserve:
 982	xfs_trans_unreserve_and_mod_sb(tp);
 983
 984	/*
 985	 * It is indeed possible for the transaction to be not dirty but
 986	 * the dqinfo portion to be.  All that means is that we have some
 987	 * (non-persistent) quota reservations that need to be unreserved.
 988	 */
 989	xfs_trans_unreserve_and_mod_dquots(tp);
 990	if (tp->t_ticket) {
 991		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
 992		if (commit_lsn == -1 && !error)
 993			error = -EIO;
 994	}
 995	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 996	xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
 997	xfs_trans_free(tp);
 998
 999	XFS_STATS_INC(mp, xs_trans_empty);
1000	return error;
1001}
1002
1003int
1004xfs_trans_commit(
1005	struct xfs_trans	*tp)
1006{
1007	return __xfs_trans_commit(tp, false);
1008}
1009
1010/*
1011 * Unlock all of the transaction's items and free the transaction.
1012 * The transaction must not have modified any of its items, because
1013 * there is no way to restore them to their previous state.
1014 *
1015 * If the transaction has made a log reservation, make sure to release
1016 * it as well.
1017 */
1018void
1019xfs_trans_cancel(
1020	struct xfs_trans	*tp)
 
1021{
1022	struct xfs_mount	*mp = tp->t_mountp;
1023	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1024
1025	/*
 
 
 
 
 
 
1026	 * See if the caller is relying on us to shut down the
1027	 * filesystem.  This happens in paths where we detect
1028	 * corruption and decide to give up.
1029	 */
1030	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1031		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1032		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1033	}
1034#ifdef DEBUG
1035	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1036		struct xfs_log_item_desc *lidp;
1037
1038		list_for_each_entry(lidp, &tp->t_items, lid_trans)
1039			ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1040	}
1041#endif
1042	xfs_trans_unreserve_and_mod_sb(tp);
1043	xfs_trans_unreserve_and_mod_dquots(tp);
1044
1045	if (tp->t_ticket)
1046		xfs_log_done(mp, tp->t_ticket, NULL, false);
 
 
 
 
 
 
 
1047
1048	/* mark this thread as no longer being in a transaction */
1049	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1050
1051	xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1052	xfs_trans_free(tp);
1053}
1054
1055/*
1056 * Roll from one trans in the sequence of PERMANENT transactions to
1057 * the next: permanent transactions are only flushed out when
1058 * committed with xfs_trans_commit(), but we still want as soon
1059 * as possible to let chunks of it go to the log. So we commit the
1060 * chunk we've been working on and get a new transaction to continue.
1061 */
1062int
1063xfs_trans_roll(
1064	struct xfs_trans	**tpp)
 
1065{
1066	struct xfs_trans	*trans = *tpp;
1067	struct xfs_trans_res	tres;
1068	int			error;
1069
1070	/*
 
 
 
 
 
 
1071	 * Copy the critical parameters from one trans to the next.
1072	 */
1073	tres.tr_logres = trans->t_log_res;
1074	tres.tr_logcount = trans->t_log_count;
1075
1076	*tpp = xfs_trans_dup(trans);
1077
1078	/*
1079	 * Commit the current transaction.
1080	 * If this commit failed, then it'd just unlock those items that
1081	 * are not marked ihold. That also means that a filesystem shutdown
1082	 * is in progress. The caller takes the responsibility to cancel
1083	 * the duplicate transaction that gets returned.
1084	 */
1085	error = __xfs_trans_commit(trans, true);
1086	if (error)
1087		return error;
 
 
 
 
 
 
 
 
 
1088
1089	/*
1090	 * Reserve space in the log for the next transaction.
1091	 * This also pushes items in the "AIL", the list of logged items,
1092	 * out to disk if they are taking up space at the tail of the log
1093	 * that we want to use.  This requires that either nothing be locked
1094	 * across this call, or that anything that is locked be logged in
1095	 * the prior and the next transactions.
1096	 */
1097	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1098	return xfs_trans_reserve(*tpp, &tres, 0, 0);
 
 
 
 
 
 
 
 
1099}