Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v3.15
 
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_bit.h"
  25#include "xfs_inum.h"
  26#include "xfs_sb.h"
  27#include "xfs_ag.h"
  28#include "xfs_mount.h"
  29#include "xfs_da_format.h"
  30#include "xfs_inode.h"
  31#include "xfs_dir2.h"
  32#include "xfs_ialloc.h"
  33#include "xfs_alloc.h"
  34#include "xfs_rtalloc.h"
  35#include "xfs_bmap.h"
  36#include "xfs_trans.h"
  37#include "xfs_trans_priv.h"
  38#include "xfs_log.h"
  39#include "xfs_error.h"
  40#include "xfs_quota.h"
  41#include "xfs_fsops.h"
  42#include "xfs_trace.h"
  43#include "xfs_icache.h"
  44#include "xfs_dinode.h"
  45
  46
  47#ifdef HAVE_PERCPU_SB
  48STATIC void	xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
  49						int);
  50STATIC void	xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
  51						int);
  52STATIC void	xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
  53#else
  54
  55#define xfs_icsb_balance_counter(mp, a, b)		do { } while (0)
  56#define xfs_icsb_balance_counter_locked(mp, a, b)	do { } while (0)
  57#endif
  58
  59static DEFINE_MUTEX(xfs_uuid_table_mutex);
  60static int xfs_uuid_table_size;
  61static uuid_t *xfs_uuid_table;
  62
 
 
 
 
 
 
 
 
 
 
  63/*
  64 * See if the UUID is unique among mounted XFS filesystems.
  65 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
  66 */
  67STATIC int
  68xfs_uuid_mount(
  69	struct xfs_mount	*mp)
  70{
  71	uuid_t			*uuid = &mp->m_sb.sb_uuid;
  72	int			hole, i;
  73
 
 
 
  74	if (mp->m_flags & XFS_MOUNT_NOUUID)
  75		return 0;
  76
  77	if (uuid_is_nil(uuid)) {
  78		xfs_warn(mp, "Filesystem has nil UUID - can't mount");
  79		return XFS_ERROR(EINVAL);
  80	}
  81
  82	mutex_lock(&xfs_uuid_table_mutex);
  83	for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
  84		if (uuid_is_nil(&xfs_uuid_table[i])) {
  85			hole = i;
  86			continue;
  87		}
  88		if (uuid_equal(uuid, &xfs_uuid_table[i]))
  89			goto out_duplicate;
  90	}
  91
  92	if (hole < 0) {
  93		xfs_uuid_table = kmem_realloc(xfs_uuid_table,
  94			(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
  95			xfs_uuid_table_size  * sizeof(*xfs_uuid_table),
  96			KM_SLEEP);
  97		hole = xfs_uuid_table_size++;
  98	}
  99	xfs_uuid_table[hole] = *uuid;
 100	mutex_unlock(&xfs_uuid_table_mutex);
 101
 102	return 0;
 103
 104 out_duplicate:
 105	mutex_unlock(&xfs_uuid_table_mutex);
 106	xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
 107	return XFS_ERROR(EINVAL);
 108}
 109
 110STATIC void
 111xfs_uuid_unmount(
 112	struct xfs_mount	*mp)
 113{
 114	uuid_t			*uuid = &mp->m_sb.sb_uuid;
 115	int			i;
 116
 117	if (mp->m_flags & XFS_MOUNT_NOUUID)
 118		return;
 119
 120	mutex_lock(&xfs_uuid_table_mutex);
 121	for (i = 0; i < xfs_uuid_table_size; i++) {
 122		if (uuid_is_nil(&xfs_uuid_table[i]))
 123			continue;
 124		if (!uuid_equal(uuid, &xfs_uuid_table[i]))
 125			continue;
 126		memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
 127		break;
 128	}
 129	ASSERT(i < xfs_uuid_table_size);
 130	mutex_unlock(&xfs_uuid_table_mutex);
 131}
 132
 133
 134STATIC void
 135__xfs_free_perag(
 136	struct rcu_head	*head)
 137{
 138	struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
 139
 140	ASSERT(atomic_read(&pag->pag_ref) == 0);
 141	kmem_free(pag);
 142}
 143
 144/*
 145 * Free up the per-ag resources associated with the mount structure.
 146 */
 147STATIC void
 148xfs_free_perag(
 149	xfs_mount_t	*mp)
 150{
 151	xfs_agnumber_t	agno;
 152	struct xfs_perag *pag;
 153
 154	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
 155		spin_lock(&mp->m_perag_lock);
 156		pag = radix_tree_delete(&mp->m_perag_tree, agno);
 157		spin_unlock(&mp->m_perag_lock);
 158		ASSERT(pag);
 159		ASSERT(atomic_read(&pag->pag_ref) == 0);
 
 
 160		call_rcu(&pag->rcu_head, __xfs_free_perag);
 161	}
 162}
 163
 164/*
 165 * Check size of device based on the (data/realtime) block count.
 166 * Note: this check is used by the growfs code as well as mount.
 167 */
 168int
 169xfs_sb_validate_fsb_count(
 170	xfs_sb_t	*sbp,
 171	__uint64_t	nblocks)
 172{
 173	ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
 174	ASSERT(sbp->sb_blocklog >= BBSHIFT);
 175
 176#if XFS_BIG_BLKNOS     /* Limited by ULONG_MAX of page cache index */
 177	if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
 178		return EFBIG;
 179#else                  /* Limited by UINT_MAX of sectors */
 180	if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX)
 181		return EFBIG;
 182#endif
 183	return 0;
 184}
 185
 186int
 187xfs_initialize_perag(
 188	xfs_mount_t	*mp,
 189	xfs_agnumber_t	agcount,
 190	xfs_agnumber_t	*maxagi)
 191{
 192	xfs_agnumber_t	index;
 193	xfs_agnumber_t	first_initialised = 0;
 194	xfs_perag_t	*pag;
 195	xfs_agino_t	agino;
 196	xfs_ino_t	ino;
 197	xfs_sb_t	*sbp = &mp->m_sb;
 198	int		error = -ENOMEM;
 199
 200	/*
 201	 * Walk the current per-ag tree so we don't try to initialise AGs
 202	 * that already exist (growfs case). Allocate and insert all the
 203	 * AGs we don't find ready for initialisation.
 204	 */
 205	for (index = 0; index < agcount; index++) {
 206		pag = xfs_perag_get(mp, index);
 207		if (pag) {
 208			xfs_perag_put(pag);
 209			continue;
 210		}
 211		if (!first_initialised)
 212			first_initialised = index;
 213
 214		pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
 215		if (!pag)
 216			goto out_unwind;
 217		pag->pag_agno = index;
 218		pag->pag_mount = mp;
 219		spin_lock_init(&pag->pag_ici_lock);
 220		mutex_init(&pag->pag_ici_reclaim_lock);
 221		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
 222		spin_lock_init(&pag->pag_buf_lock);
 223		pag->pag_buf_tree = RB_ROOT;
 
 
 
 
 224
 225		if (radix_tree_preload(GFP_NOFS))
 226			goto out_unwind;
 227
 228		spin_lock(&mp->m_perag_lock);
 229		if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
 230			BUG();
 231			spin_unlock(&mp->m_perag_lock);
 232			radix_tree_preload_end();
 233			error = -EEXIST;
 234			goto out_unwind;
 235		}
 236		spin_unlock(&mp->m_perag_lock);
 237		radix_tree_preload_end();
 
 
 
 
 
 
 
 238	}
 239
 240	/*
 241	 * If we mount with the inode64 option, or no inode overflows
 242	 * the legacy 32-bit address space clear the inode32 option.
 243	 */
 244	agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
 245	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 246
 247	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
 248		mp->m_flags |= XFS_MOUNT_32BITINODES;
 249	else
 250		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
 251
 252	if (mp->m_flags & XFS_MOUNT_32BITINODES)
 253		index = xfs_set_inode32(mp);
 254	else
 255		index = xfs_set_inode64(mp);
 256
 257	if (maxagi)
 258		*maxagi = index;
 
 
 259	return 0;
 260
 261out_unwind:
 
 
 262	kmem_free(pag);
 263	for (; index > first_initialised; index--) {
 
 
 264		pag = radix_tree_delete(&mp->m_perag_tree, index);
 
 
 
 
 265		kmem_free(pag);
 266	}
 267	return error;
 268}
 269
 270/*
 271 * xfs_readsb
 272 *
 273 * Does the initial read of the superblock.
 274 */
 275int
 276xfs_readsb(
 277	struct xfs_mount *mp,
 278	int		flags)
 279{
 280	unsigned int	sector_size;
 281	struct xfs_buf	*bp;
 282	struct xfs_sb	*sbp = &mp->m_sb;
 283	int		error;
 284	int		loud = !(flags & XFS_MFSI_QUIET);
 285	const struct xfs_buf_ops *buf_ops;
 286
 287	ASSERT(mp->m_sb_bp == NULL);
 288	ASSERT(mp->m_ddev_targp != NULL);
 289
 290	/*
 291	 * For the initial read, we must guess at the sector
 292	 * size based on the block device.  It's enough to
 293	 * get the sb_sectsize out of the superblock and
 294	 * then reread with the proper length.
 295	 * We don't verify it yet, because it may not be complete.
 296	 */
 297	sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
 298	buf_ops = NULL;
 299
 300	/*
 301	 * Allocate a (locked) buffer to hold the superblock.
 302	 * This will be kept around at all times to optimize
 303	 * access to the superblock.
 
 304	 */
 305reread:
 306	bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
 307				   BTOBB(sector_size), 0, buf_ops);
 308	if (!bp) {
 309		if (loud)
 310			xfs_warn(mp, "SB buffer read failed");
 311		return EIO;
 312	}
 313	if (bp->b_error) {
 314		error = bp->b_error;
 315		if (loud)
 316			xfs_warn(mp, "SB validate failed with error %d.", error);
 317		/* bad CRC means corrupted metadata */
 318		if (error == EFSBADCRC)
 319			error = EFSCORRUPTED;
 320		goto release_buf;
 321	}
 322
 323	/*
 324	 * Initialize the mount structure from the superblock.
 325	 */
 326	xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
 327	xfs_sb_quota_from_disk(&mp->m_sb);
 
 
 
 
 
 
 
 
 
 
 328
 329	/*
 330	 * We must be able to do sector-sized and sector-aligned IO.
 331	 */
 332	if (sector_size > sbp->sb_sectsize) {
 333		if (loud)
 334			xfs_warn(mp, "device supports %u byte sectors (not %u)",
 335				sector_size, sbp->sb_sectsize);
 336		error = ENOSYS;
 337		goto release_buf;
 338	}
 339
 340	/*
 341	 * Re-read the superblock so the buffer is correctly sized,
 342	 * and properly verified.
 343	 */
 344	if (buf_ops == NULL) {
 
 
 
 
 345		xfs_buf_relse(bp);
 346		sector_size = sbp->sb_sectsize;
 347		buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
 348		goto reread;
 349	}
 350
 351	/* Initialize per-cpu counters */
 352	xfs_icsb_reinit_counters(mp);
 353
 354	/* no need to be quiet anymore, so reset the buf ops */
 355	bp->b_ops = &xfs_sb_buf_ops;
 356
 357	mp->m_sb_bp = bp;
 358	xfs_buf_unlock(bp);
 359	return 0;
 360
 361release_buf:
 362	xfs_buf_relse(bp);
 363	return error;
 364}
 365
 366/*
 367 * Update alignment values based on mount options and sb values
 
 
 
 368 */
 369STATIC int
 370xfs_update_alignment(xfs_mount_t *mp)
 
 
 
 371{
 372	xfs_sb_t	*sbp = &(mp->m_sb);
 
 373
 374	if (mp->m_dalign) {
 375		/*
 376		 * If stripe unit and stripe width are not multiples
 377		 * of the fs blocksize turn off alignment.
 378		 */
 379		if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
 380		    (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
 381			xfs_warn(mp,
 382		"alignment check failed: sunit/swidth vs. blocksize(%d)",
 383				sbp->sb_blocksize);
 384			return XFS_ERROR(EINVAL);
 385		} else {
 386			/*
 387			 * Convert the stripe unit and width to FSBs.
 388			 */
 389			mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
 390			if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
 391				xfs_warn(mp,
 392			"alignment check failed: sunit/swidth vs. agsize(%d)",
 393					 sbp->sb_agblocks);
 394				return XFS_ERROR(EINVAL);
 395			} else if (mp->m_dalign) {
 396				mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
 397			} else {
 398				xfs_warn(mp,
 399			"alignment check failed: sunit(%d) less than bsize(%d)",
 400					 mp->m_dalign, sbp->sb_blocksize);
 401				return XFS_ERROR(EINVAL);
 402			}
 403		}
 404
 405		/*
 406		 * Update superblock with new values
 407		 * and log changes
 408		 */
 409		if (xfs_sb_version_hasdalign(sbp)) {
 410			if (sbp->sb_unit != mp->m_dalign) {
 411				sbp->sb_unit = mp->m_dalign;
 412				mp->m_update_flags |= XFS_SB_UNIT;
 413			}
 414			if (sbp->sb_width != mp->m_swidth) {
 415				sbp->sb_width = mp->m_swidth;
 416				mp->m_update_flags |= XFS_SB_WIDTH;
 417			}
 418		} else {
 419			xfs_warn(mp,
 420	"cannot change alignment: superblock does not support data alignment");
 421			return XFS_ERROR(EINVAL);
 422		}
 423	} else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
 424		    xfs_sb_version_hasdalign(&mp->m_sb)) {
 425			mp->m_dalign = sbp->sb_unit;
 426			mp->m_swidth = sbp->sb_width;
 427	}
 428
 
 
 
 
 
 
 
 
 
 
 429	return 0;
 430}
 431
 432/*
 433 * Set the maximum inode count for this filesystem
 
 
 
 434 */
 435STATIC void
 436xfs_set_maxicount(xfs_mount_t *mp)
 
 437{
 438	xfs_sb_t	*sbp = &(mp->m_sb);
 439	__uint64_t	icount;
 440
 441	if (sbp->sb_imax_pct) {
 
 
 
 
 
 
 
 
 
 
 442		/*
 443		 * Make sure the maximum inode count is a multiple
 444		 * of the units we allocate inodes in.
 445		 */
 446		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 447		do_div(icount, 100);
 448		do_div(icount, mp->m_ialloc_blks);
 449		mp->m_maxicount = (icount * mp->m_ialloc_blks)  <<
 450				   sbp->sb_inopblog;
 451	} else {
 452		mp->m_maxicount = 0;
 
 
 
 
 
 
 
 453	}
 
 
 
 
 
 
 
 
 454}
 455
 456/*
 457 * Set the default minimum read and write sizes unless
 458 * already specified in a mount option.
 459 * We use smaller I/O sizes when the file system
 460 * is being used for NFS service (wsync mount option).
 461 */
 462STATIC void
 463xfs_set_rw_sizes(xfs_mount_t *mp)
 464{
 465	xfs_sb_t	*sbp = &(mp->m_sb);
 466	int		readio_log, writeio_log;
 467
 468	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
 469		if (mp->m_flags & XFS_MOUNT_WSYNC) {
 470			readio_log = XFS_WSYNC_READIO_LOG;
 471			writeio_log = XFS_WSYNC_WRITEIO_LOG;
 472		} else {
 473			readio_log = XFS_READIO_LOG_LARGE;
 474			writeio_log = XFS_WRITEIO_LOG_LARGE;
 475		}
 476	} else {
 477		readio_log = mp->m_readio_log;
 478		writeio_log = mp->m_writeio_log;
 479	}
 480
 481	if (sbp->sb_blocklog > readio_log) {
 482		mp->m_readio_log = sbp->sb_blocklog;
 483	} else {
 484		mp->m_readio_log = readio_log;
 485	}
 486	mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
 487	if (sbp->sb_blocklog > writeio_log) {
 488		mp->m_writeio_log = sbp->sb_blocklog;
 489	} else {
 490		mp->m_writeio_log = writeio_log;
 
 
 
 
 
 491	}
 492	mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
 
 493}
 494
 495/*
 496 * precalculate the low space thresholds for dynamic speculative preallocation.
 497 */
 498void
 499xfs_set_low_space_thresholds(
 500	struct xfs_mount	*mp)
 501{
 502	int i;
 503
 504	for (i = 0; i < XFS_LOWSP_MAX; i++) {
 505		__uint64_t space = mp->m_sb.sb_dblocks;
 506
 507		do_div(space, 100);
 508		mp->m_low_space[i] = space * (i + 1);
 509	}
 510}
 511
 512
 513/*
 514 * Set whether we're using inode alignment.
 515 */
 516STATIC void
 517xfs_set_inoalignment(xfs_mount_t *mp)
 518{
 519	if (xfs_sb_version_hasalign(&mp->m_sb) &&
 520	    mp->m_sb.sb_inoalignmt >=
 521	    XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
 522		mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
 523	else
 524		mp->m_inoalign_mask = 0;
 525	/*
 526	 * If we are using stripe alignment, check whether
 527	 * the stripe unit is a multiple of the inode alignment
 528	 */
 529	if (mp->m_dalign && mp->m_inoalign_mask &&
 530	    !(mp->m_dalign & mp->m_inoalign_mask))
 531		mp->m_sinoalign = mp->m_dalign;
 532	else
 533		mp->m_sinoalign = 0;
 534}
 535
 536/*
 537 * Check that the data (and log if separate) is an ok size.
 538 */
 539STATIC int
 540xfs_check_sizes(xfs_mount_t *mp)
 
 541{
 542	xfs_buf_t	*bp;
 543	xfs_daddr_t	d;
 
 544
 545	d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
 546	if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
 547		xfs_warn(mp, "filesystem size mismatch detected");
 548		return XFS_ERROR(EFBIG);
 549	}
 550	bp = xfs_buf_read_uncached(mp->m_ddev_targp,
 551					d - XFS_FSS_TO_BB(mp, 1),
 552					XFS_FSS_TO_BB(mp, 1), 0, NULL);
 553	if (!bp) {
 554		xfs_warn(mp, "last sector read failed");
 555		return EIO;
 556	}
 557	xfs_buf_relse(bp);
 558
 559	if (mp->m_logdev_targp != mp->m_ddev_targp) {
 560		d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
 561		if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
 562			xfs_warn(mp, "log size mismatch detected");
 563			return XFS_ERROR(EFBIG);
 564		}
 565		bp = xfs_buf_read_uncached(mp->m_logdev_targp,
 
 
 566					d - XFS_FSB_TO_BB(mp, 1),
 567					XFS_FSB_TO_BB(mp, 1), 0, NULL);
 568		if (!bp) {
 569			xfs_warn(mp, "log device read failed");
 570			return EIO;
 571		}
 572		xfs_buf_relse(bp);
 573	}
 
 574	return 0;
 575}
 576
 577/*
 578 * Clear the quotaflags in memory and in the superblock.
 579 */
 580int
 581xfs_mount_reset_sbqflags(
 582	struct xfs_mount	*mp)
 583{
 584	int			error;
 585	struct xfs_trans	*tp;
 586
 587	mp->m_qflags = 0;
 588
 589	/*
 590	 * It is OK to look at sb_qflags here in mount path,
 591	 * without m_sb_lock.
 592	 */
 593	if (mp->m_sb.sb_qflags == 0)
 594		return 0;
 595	spin_lock(&mp->m_sb_lock);
 596	mp->m_sb.sb_qflags = 0;
 597	spin_unlock(&mp->m_sb_lock);
 598
 599	/*
 600	 * If the fs is readonly, let the incore superblock run
 601	 * with quotas off but don't flush the update out to disk
 602	 */
 603	if (mp->m_flags & XFS_MOUNT_RDONLY)
 604		return 0;
 605
 606	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
 607	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
 608	if (error) {
 609		xfs_trans_cancel(tp, 0);
 610		xfs_alert(mp, "%s: Superblock update failed!", __func__);
 611		return error;
 612	}
 613
 614	xfs_mod_sb(tp, XFS_SB_QFLAGS);
 615	return xfs_trans_commit(tp, 0);
 616}
 617
 618__uint64_t
 619xfs_default_resblks(xfs_mount_t *mp)
 620{
 621	__uint64_t resblks;
 622
 623	/*
 624	 * We default to 5% or 8192 fsbs of space reserved, whichever is
 625	 * smaller.  This is intended to cover concurrent allocation
 626	 * transactions when we initially hit enospc. These each require a 4
 627	 * block reservation. Hence by default we cover roughly 2000 concurrent
 628	 * allocation reservations.
 629	 */
 630	resblks = mp->m_sb.sb_dblocks;
 631	do_div(resblks, 20);
 632	resblks = min_t(__uint64_t, resblks, 8192);
 633	return resblks;
 634}
 635
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636/*
 637 * This function does the following on an initial mount of a file system:
 638 *	- reads the superblock from disk and init the mount struct
 639 *	- if we're a 32-bit kernel, do a size check on the superblock
 640 *		so we don't mount terabyte filesystems
 641 *	- init mount struct realtime fields
 642 *	- allocate inode hash table for fs
 643 *	- init directory manager
 644 *	- perform recovery and init the log manager
 645 */
 646int
 647xfs_mountfs(
 648	xfs_mount_t	*mp)
 649{
 650	xfs_sb_t	*sbp = &(mp->m_sb);
 651	xfs_inode_t	*rip;
 652	__uint64_t	resblks;
 653	uint		quotamount = 0;
 654	uint		quotaflags = 0;
 655	int		error = 0;
 
 656
 657	xfs_sb_mount_common(mp, sbp);
 658
 659	/*
 660	 * Check for a mismatched features2 values.  Older kernels
 661	 * read & wrote into the wrong sb offset for sb_features2
 662	 * on some platforms due to xfs_sb_t not being 64bit size aligned
 663	 * when sb_features2 was added, which made older superblock
 664	 * reading/writing routines swap it as a 64-bit value.
 665	 *
 666	 * For backwards compatibility, we make both slots equal.
 667	 *
 668	 * If we detect a mismatched field, we OR the set bits into the
 669	 * existing features2 field in case it has already been modified; we
 670	 * don't want to lose any features.  We then update the bad location
 671	 * with the ORed value so that older kernels will see any features2
 672	 * flags, and mark the two fields as needing updates once the
 673	 * transaction subsystem is online.
 674	 */
 675	if (xfs_sb_has_mismatched_features2(sbp)) {
 676		xfs_warn(mp, "correcting sb_features alignment problem");
 677		sbp->sb_features2 |= sbp->sb_bad_features2;
 678		sbp->sb_bad_features2 = sbp->sb_features2;
 679		mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
 680
 681		/*
 682		 * Re-check for ATTR2 in case it was found in bad_features2
 683		 * slot.
 684		 */
 685		if (xfs_sb_version_hasattr2(&mp->m_sb) &&
 686		   !(mp->m_flags & XFS_MOUNT_NOATTR2))
 687			mp->m_flags |= XFS_MOUNT_ATTR2;
 688	}
 689
 690	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
 691	   (mp->m_flags & XFS_MOUNT_NOATTR2)) {
 692		xfs_sb_version_removeattr2(&mp->m_sb);
 693		mp->m_update_flags |= XFS_SB_FEATURES2;
 694
 695		/* update sb_versionnum for the clearing of the morebits */
 696		if (!sbp->sb_features2)
 697			mp->m_update_flags |= XFS_SB_VERSIONNUM;
 
 
 
 
 
 
 698	}
 699
 700	/*
 701	 * Check if sb_agblocks is aligned at stripe boundary
 702	 * If sb_agblocks is NOT aligned turn off m_dalign since
 703	 * allocator alignment is within an ag, therefore ag has
 704	 * to be aligned at stripe boundary.
 705	 */
 706	error = xfs_update_alignment(mp);
 707	if (error)
 708		goto out;
 709
 710	xfs_alloc_compute_maxlevels(mp);
 711	xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
 712	xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
 713	xfs_ialloc_compute_maxlevels(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 714
 715	xfs_set_maxicount(mp);
 
 716
 717	error = xfs_uuid_mount(mp);
 
 718	if (error)
 719		goto out;
 720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721	/*
 722	 * Set the minimum read and write sizes
 
 723	 */
 724	xfs_set_rw_sizes(mp);
 
 
 725
 726	/* set the low space thresholds for dynamic preallocation */
 727	xfs_set_low_space_thresholds(mp);
 728
 729	/*
 730	 * Set the inode cluster size.
 731	 * This may still be overridden by the file system
 732	 * block size if it is larger than the chosen cluster size.
 733	 *
 734	 * For v5 filesystems, scale the cluster size with the inode size to
 735	 * keep a constant ratio of inode per cluster buffer, but only if mkfs
 736	 * has set the inode alignment value appropriately for larger cluster
 737	 * sizes.
 738	 */
 739	mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
 740	if (xfs_sb_version_hascrc(&mp->m_sb)) {
 741		int	new_size = mp->m_inode_cluster_size;
 742
 743		new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
 744		if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
 745			mp->m_inode_cluster_size = new_size;
 746	}
 747
 748	/*
 749	 * Set inode alignment fields
 750	 */
 751	xfs_set_inoalignment(mp);
 752
 753	/*
 754	 * Check that the data (and log if separate) is an ok size.
 755	 */
 756	error = xfs_check_sizes(mp);
 757	if (error)
 758		goto out_remove_uuid;
 759
 760	/*
 761	 * Initialize realtime fields in the mount structure
 762	 */
 763	error = xfs_rtmount_init(mp);
 764	if (error) {
 765		xfs_warn(mp, "RT mount failed");
 766		goto out_remove_uuid;
 767	}
 768
 769	/*
 770	 *  Copies the low order bits of the timestamp and the randomly
 771	 *  set "sequence" number out of a UUID.
 772	 */
 773	uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
 774
 775	mp->m_dmevmask = 0;	/* not persistent; set after each mount */
 
 776
 777	xfs_dir_mount(mp);
 778
 779	/*
 780	 * Initialize the attribute manager's entries.
 781	 */
 782	mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
 783
 784	/*
 785	 * Initialize the precomputed transaction reservations values.
 786	 */
 787	xfs_trans_init(mp);
 788
 789	/*
 790	 * Allocate and initialize the per-ag data.
 791	 */
 792	spin_lock_init(&mp->m_perag_lock);
 793	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
 794	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
 795	if (error) {
 796		xfs_warn(mp, "Failed per-ag init: %d", error);
 797		goto out_remove_uuid;
 798	}
 799
 800	if (!sbp->sb_logblocks) {
 801		xfs_warn(mp, "no log defined");
 802		XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
 803		error = XFS_ERROR(EFSCORRUPTED);
 804		goto out_free_perag;
 805	}
 806
 807	/*
 808	 * log's mount-time initialization. Perform 1st part recovery if needed
 
 
 809	 */
 810	error = xfs_log_mount(mp, mp->m_logdev_targp,
 811			      XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
 812			      XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
 813	if (error) {
 814		xfs_warn(mp, "log mount failed");
 815		goto out_fail_wait;
 816	}
 817
 818	/*
 819	 * Now the log is mounted, we know if it was an unclean shutdown or
 820	 * not. If it was, with the first phase of recovery has completed, we
 821	 * have consistent AG blocks on disk. We have not recovered EFIs yet,
 822	 * but they are recovered transactionally in the second recovery phase
 823	 * later.
 824	 *
 825	 * Hence we can safely re-initialise incore superblock counters from
 826	 * the per-ag data. These may not be correct if the filesystem was not
 827	 * cleanly unmounted, so we need to wait for recovery to finish before
 828	 * doing this.
 829	 *
 830	 * If the filesystem was cleanly unmounted, then we can trust the
 831	 * values in the superblock to be correct and we don't need to do
 832	 * anything here.
 833	 *
 834	 * If we are currently making the filesystem, the initialisation will
 835	 * fail as the perag data is in an undefined state.
 836	 */
 837	if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
 838	    !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
 839	     !mp->m_sb.sb_inprogress) {
 840		error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
 841		if (error)
 842			goto out_fail_wait;
 843	}
 844
 845	/*
 846	 * Get and sanity-check the root inode.
 847	 * Save the pointer to it in the mount structure.
 848	 */
 849	error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
 
 850	if (error) {
 851		xfs_warn(mp, "failed to read root inode");
 
 
 852		goto out_log_dealloc;
 853	}
 854
 855	ASSERT(rip != NULL);
 856
 857	if (unlikely(!S_ISDIR(rip->i_d.di_mode))) {
 858		xfs_warn(mp, "corrupted root inode %llu: not a directory",
 859			(unsigned long long)rip->i_ino);
 860		xfs_iunlock(rip, XFS_ILOCK_EXCL);
 861		XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
 862				 mp);
 863		error = XFS_ERROR(EFSCORRUPTED);
 864		goto out_rele_rip;
 865	}
 866	mp->m_rootip = rip;	/* save it */
 867
 868	xfs_iunlock(rip, XFS_ILOCK_EXCL);
 869
 870	/*
 871	 * Initialize realtime inode pointers in the mount structure
 872	 */
 873	error = xfs_rtmount_inodes(mp);
 874	if (error) {
 875		/*
 876		 * Free up the root inode.
 877		 */
 878		xfs_warn(mp, "failed to read RT inodes");
 879		goto out_rele_rip;
 880	}
 881
 882	/*
 883	 * If this is a read-only mount defer the superblock updates until
 884	 * the next remount into writeable mode.  Otherwise we would never
 885	 * perform the update e.g. for the root filesystem.
 886	 */
 887	if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
 888		error = xfs_mount_log_sb(mp, mp->m_update_flags);
 889		if (error) {
 890			xfs_warn(mp, "failed to write sb changes");
 891			goto out_rtunmount;
 892		}
 893	}
 894
 895	/*
 896	 * Initialise the XFS quota management subsystem for this mount
 897	 */
 898	if (XFS_IS_QUOTA_RUNNING(mp)) {
 899		error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
 900		if (error)
 901			goto out_rtunmount;
 902	} else {
 903		ASSERT(!XFS_IS_QUOTA_ON(mp));
 904
 905		/*
 906		 * If a file system had quotas running earlier, but decided to
 907		 * mount without -o uquota/pquota/gquota options, revoke the
 908		 * quotachecked license.
 909		 */
 910		if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
 911			xfs_notice(mp, "resetting quota flags");
 912			error = xfs_mount_reset_sbqflags(mp);
 913			if (error)
 914				return error;
 915		}
 916	}
 917
 918	/*
 919	 * Finish recovering the file system.  This part needed to be
 920	 * delayed until after the root and real-time bitmap inodes
 921	 * were consistently read in.
 922	 */
 923	error = xfs_log_mount_finish(mp);
 924	if (error) {
 925		xfs_warn(mp, "log mount finish failed");
 926		goto out_rtunmount;
 927	}
 928
 929	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930	 * Complete the quota initialisation, post-log-replay component.
 931	 */
 932	if (quotamount) {
 933		ASSERT(mp->m_qflags == 0);
 934		mp->m_qflags = quotaflags;
 935
 936		xfs_qm_mount_quotas(mp);
 937	}
 938
 939	/*
 940	 * Now we are mounted, reserve a small amount of unused space for
 941	 * privileged transactions. This is needed so that transaction
 942	 * space required for critical operations can dip into this pool
 943	 * when at ENOSPC. This is needed for operations like create with
 944	 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
 945	 * are not allowed to use this reserved space.
 946	 *
 947	 * This may drive us straight to ENOSPC on mount, but that implies
 948	 * we were already there on the last unmount. Warn if this occurs.
 949	 */
 950	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
 951		resblks = xfs_default_resblks(mp);
 952		error = xfs_reserve_blocks(mp, &resblks, NULL);
 953		if (error)
 954			xfs_warn(mp,
 955	"Unable to allocate reserve blocks. Continuing without reserve pool.");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 956	}
 957
 958	return 0;
 959
 
 
 
 
 960 out_rtunmount:
 961	xfs_rtunmount_inodes(mp);
 962 out_rele_rip:
 963	IRELE(rip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 964 out_log_dealloc:
 965	xfs_log_unmount(mp);
 
 966 out_fail_wait:
 967	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
 968		xfs_wait_buftarg(mp->m_logdev_targp);
 969	xfs_wait_buftarg(mp->m_ddev_targp);
 970 out_free_perag:
 971	xfs_free_perag(mp);
 
 
 972 out_remove_uuid:
 973	xfs_uuid_unmount(mp);
 
 
 
 
 
 
 
 
 974 out:
 975	return error;
 976}
 977
 978/*
 979 * This flushes out the inodes,dquots and the superblock, unmounts the
 980 * log and makes sure that incore structures are freed.
 981 */
 982void
 983xfs_unmountfs(
 984	struct xfs_mount	*mp)
 985{
 986	__uint64_t		resblks;
 987	int			error;
 988
 989	cancel_delayed_work_sync(&mp->m_eofblocks_work);
 990
 991	xfs_qm_unmount_quotas(mp);
 992	xfs_rtunmount_inodes(mp);
 993	IRELE(mp->m_rootip);
 994
 995	/*
 996	 * We can potentially deadlock here if we have an inode cluster
 997	 * that has been freed has its buffer still pinned in memory because
 998	 * the transaction is still sitting in a iclog. The stale inodes
 999	 * on that buffer will have their flush locks held until the
1000	 * transaction hits the disk and the callbacks run. the inode
1001	 * flush takes the flush lock unconditionally and with nothing to
1002	 * push out the iclog we will never get that unlocked. hence we
1003	 * need to force the log first.
1004	 */
1005	xfs_log_force(mp, XFS_LOG_SYNC);
1006
1007	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008	 * Flush all pending changes from the AIL.
1009	 */
1010	xfs_ail_push_all_sync(mp->m_ail);
1011
1012	/*
1013	 * And reclaim all inodes.  At this point there should be no dirty
1014	 * inodes and none should be pinned or locked, but use synchronous
1015	 * reclaim just to be sure. We can stop background inode reclaim
1016	 * here as well if it is still running.
1017	 */
1018	cancel_delayed_work_sync(&mp->m_reclaim_work);
1019	xfs_reclaim_inodes(mp, SYNC_WAIT);
 
1020
1021	xfs_qm_unmount(mp);
1022
1023	/*
1024	 * Unreserve any blocks we have so that when we unmount we don't account
1025	 * the reserved free space as used. This is really only necessary for
1026	 * lazy superblock counting because it trusts the incore superblock
1027	 * counters to be absolutely correct on clean unmount.
1028	 *
1029	 * We don't bother correcting this elsewhere for lazy superblock
1030	 * counting because on mount of an unclean filesystem we reconstruct the
1031	 * correct counter value and this is irrelevant.
1032	 *
1033	 * For non-lazy counter filesystems, this doesn't matter at all because
1034	 * we only every apply deltas to the superblock and hence the incore
1035	 * value does not matter....
1036	 */
1037	resblks = 0;
1038	error = xfs_reserve_blocks(mp, &resblks, NULL);
1039	if (error)
1040		xfs_warn(mp, "Unable to free reserved block pool. "
1041				"Freespace may not be correct on next mount.");
1042
1043	error = xfs_log_sbcount(mp);
1044	if (error)
1045		xfs_warn(mp, "Unable to update superblock counters. "
1046				"Freespace may not be correct on next mount.");
1047
 
1048	xfs_log_unmount(mp);
 
1049	xfs_uuid_unmount(mp);
1050
1051#if defined(DEBUG)
1052	xfs_errortag_clearall(mp, 0);
1053#endif
1054	xfs_free_perag(mp);
 
 
 
 
 
1055}
1056
1057int
1058xfs_fs_writable(xfs_mount_t *mp)
 
 
 
 
 
 
 
 
1059{
1060	return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) ||
1061		(mp->m_flags & XFS_MOUNT_RDONLY));
 
 
 
 
1062}
1063
1064/*
1065 * xfs_log_sbcount
1066 *
1067 * Sync the superblock counters to disk.
1068 *
1069 * Note this code can be called during the process of freezing, so
1070 * we may need to use the transaction allocator which does not
1071 * block when the transaction subsystem is in its frozen state.
1072 */
1073int
1074xfs_log_sbcount(xfs_mount_t *mp)
1075{
1076	xfs_trans_t	*tp;
1077	int		error;
1078
1079	if (!xfs_fs_writable(mp))
1080		return 0;
1081
1082	xfs_icsb_sync_counters(mp, 0);
1083
1084	/*
1085	 * we don't need to do this if we are updating the superblock
1086	 * counters on every modification.
1087	 */
1088	if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1089		return 0;
1090
1091	tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1092	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
1093	if (error) {
1094		xfs_trans_cancel(tp, 0);
1095		return error;
1096	}
1097
1098	xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1099	xfs_trans_set_sync(tp);
1100	error = xfs_trans_commit(tp, 0);
1101	return error;
1102}
1103
1104/*
1105 * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply
1106 * a delta to a specified field in the in-core superblock.  Simply
1107 * switch on the field indicated and apply the delta to that field.
1108 * Fields are not allowed to dip below zero, so if the delta would
1109 * do this do not apply it and return EINVAL.
1110 *
1111 * The m_sb_lock must be held when this routine is called.
1112 */
1113STATIC int
1114xfs_mod_incore_sb_unlocked(
1115	xfs_mount_t	*mp,
1116	xfs_sb_field_t	field,
1117	int64_t		delta,
1118	int		rsvd)
1119{
1120	int		scounter;	/* short counter for 32 bit fields */
1121	long long	lcounter;	/* long counter for 64 bit fields */
1122	long long	res_used, rem;
1123
1124	/*
1125	 * With the in-core superblock spin lock held, switch
1126	 * on the indicated field.  Apply the delta to the
1127	 * proper field.  If the fields value would dip below
1128	 * 0, then do not apply the delta and return EINVAL.
1129	 */
1130	switch (field) {
1131	case XFS_SBS_ICOUNT:
1132		lcounter = (long long)mp->m_sb.sb_icount;
1133		lcounter += delta;
1134		if (lcounter < 0) {
1135			ASSERT(0);
1136			return XFS_ERROR(EINVAL);
1137		}
1138		mp->m_sb.sb_icount = lcounter;
1139		return 0;
1140	case XFS_SBS_IFREE:
1141		lcounter = (long long)mp->m_sb.sb_ifree;
1142		lcounter += delta;
1143		if (lcounter < 0) {
1144			ASSERT(0);
1145			return XFS_ERROR(EINVAL);
1146		}
1147		mp->m_sb.sb_ifree = lcounter;
1148		return 0;
1149	case XFS_SBS_FDBLOCKS:
1150		lcounter = (long long)
1151			mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1152		res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1153
1154		if (delta > 0) {		/* Putting blocks back */
1155			if (res_used > delta) {
1156				mp->m_resblks_avail += delta;
1157			} else {
1158				rem = delta - res_used;
1159				mp->m_resblks_avail = mp->m_resblks;
1160				lcounter += rem;
1161			}
1162		} else {				/* Taking blocks away */
1163			lcounter += delta;
1164			if (lcounter >= 0) {
1165				mp->m_sb.sb_fdblocks = lcounter +
1166							XFS_ALLOC_SET_ASIDE(mp);
1167				return 0;
1168			}
1169
1170			/*
1171			 * We are out of blocks, use any available reserved
1172			 * blocks if were allowed to.
1173			 */
1174			if (!rsvd)
1175				return XFS_ERROR(ENOSPC);
1176
1177			lcounter = (long long)mp->m_resblks_avail + delta;
1178			if (lcounter >= 0) {
1179				mp->m_resblks_avail = lcounter;
1180				return 0;
1181			}
1182			printk_once(KERN_WARNING
1183				"Filesystem \"%s\": reserve blocks depleted! "
1184				"Consider increasing reserve pool size.",
1185				mp->m_fsname);
1186			return XFS_ERROR(ENOSPC);
1187		}
1188
1189		mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1190		return 0;
1191	case XFS_SBS_FREXTENTS:
1192		lcounter = (long long)mp->m_sb.sb_frextents;
1193		lcounter += delta;
1194		if (lcounter < 0) {
1195			return XFS_ERROR(ENOSPC);
1196		}
1197		mp->m_sb.sb_frextents = lcounter;
1198		return 0;
1199	case XFS_SBS_DBLOCKS:
1200		lcounter = (long long)mp->m_sb.sb_dblocks;
1201		lcounter += delta;
1202		if (lcounter < 0) {
1203			ASSERT(0);
1204			return XFS_ERROR(EINVAL);
1205		}
1206		mp->m_sb.sb_dblocks = lcounter;
1207		return 0;
1208	case XFS_SBS_AGCOUNT:
1209		scounter = mp->m_sb.sb_agcount;
1210		scounter += delta;
1211		if (scounter < 0) {
1212			ASSERT(0);
1213			return XFS_ERROR(EINVAL);
1214		}
1215		mp->m_sb.sb_agcount = scounter;
1216		return 0;
1217	case XFS_SBS_IMAX_PCT:
1218		scounter = mp->m_sb.sb_imax_pct;
1219		scounter += delta;
1220		if (scounter < 0) {
1221			ASSERT(0);
1222			return XFS_ERROR(EINVAL);
1223		}
1224		mp->m_sb.sb_imax_pct = scounter;
1225		return 0;
1226	case XFS_SBS_REXTSIZE:
1227		scounter = mp->m_sb.sb_rextsize;
1228		scounter += delta;
1229		if (scounter < 0) {
1230			ASSERT(0);
1231			return XFS_ERROR(EINVAL);
1232		}
1233		mp->m_sb.sb_rextsize = scounter;
1234		return 0;
1235	case XFS_SBS_RBMBLOCKS:
1236		scounter = mp->m_sb.sb_rbmblocks;
1237		scounter += delta;
1238		if (scounter < 0) {
1239			ASSERT(0);
1240			return XFS_ERROR(EINVAL);
1241		}
1242		mp->m_sb.sb_rbmblocks = scounter;
1243		return 0;
1244	case XFS_SBS_RBLOCKS:
1245		lcounter = (long long)mp->m_sb.sb_rblocks;
1246		lcounter += delta;
1247		if (lcounter < 0) {
1248			ASSERT(0);
1249			return XFS_ERROR(EINVAL);
1250		}
1251		mp->m_sb.sb_rblocks = lcounter;
1252		return 0;
1253	case XFS_SBS_REXTENTS:
1254		lcounter = (long long)mp->m_sb.sb_rextents;
1255		lcounter += delta;
1256		if (lcounter < 0) {
1257			ASSERT(0);
1258			return XFS_ERROR(EINVAL);
1259		}
1260		mp->m_sb.sb_rextents = lcounter;
1261		return 0;
1262	case XFS_SBS_REXTSLOG:
1263		scounter = mp->m_sb.sb_rextslog;
1264		scounter += delta;
1265		if (scounter < 0) {
1266			ASSERT(0);
1267			return XFS_ERROR(EINVAL);
1268		}
1269		mp->m_sb.sb_rextslog = scounter;
1270		return 0;
1271	default:
1272		ASSERT(0);
1273		return XFS_ERROR(EINVAL);
1274	}
1275}
1276
1277/*
1278 * xfs_mod_incore_sb() is used to change a field in the in-core
1279 * superblock structure by the specified delta.  This modification
1280 * is protected by the m_sb_lock.  Just use the xfs_mod_incore_sb_unlocked()
1281 * routine to do the work.
1282 */
1283int
1284xfs_mod_incore_sb(
1285	struct xfs_mount	*mp,
1286	xfs_sb_field_t		field,
1287	int64_t			delta,
1288	int			rsvd)
1289{
1290	int			status;
1291
1292#ifdef HAVE_PERCPU_SB
1293	ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
1294#endif
 
 
 
 
 
 
 
 
1295	spin_lock(&mp->m_sb_lock);
1296	status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1297	spin_unlock(&mp->m_sb_lock);
 
 
 
 
 
 
 
 
 
 
1298
1299	return status;
 
 
1300}
1301
1302/*
1303 * Change more than one field in the in-core superblock structure at a time.
1304 *
1305 * The fields and changes to those fields are specified in the array of
1306 * xfs_mod_sb structures passed in.  Either all of the specified deltas
1307 * will be applied or none of them will.  If any modified field dips below 0,
1308 * then all modifications will be backed out and EINVAL will be returned.
1309 *
1310 * Note that this function may not be used for the superblock values that
1311 * are tracked with the in-memory per-cpu counters - a direct call to
1312 * xfs_icsb_modify_counters is required for these.
1313 */
1314int
1315xfs_mod_incore_sb_batch(
1316	struct xfs_mount	*mp,
1317	xfs_mod_sb_t		*msb,
1318	uint			nmsb,
1319	int			rsvd)
1320{
1321	xfs_mod_sb_t		*msbp;
1322	int			error = 0;
1323
1324	/*
1325	 * Loop through the array of mod structures and apply each individually.
1326	 * If any fail, then back out all those which have already been applied.
1327	 * Do all of this within the scope of the m_sb_lock so that all of the
1328	 * changes will be atomic.
1329	 */
1330	spin_lock(&mp->m_sb_lock);
1331	for (msbp = msb; msbp < (msb + nmsb); msbp++) {
1332		ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
1333		       msbp->msb_field > XFS_SBS_FDBLOCKS);
1334
1335		error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1336						   msbp->msb_delta, rsvd);
1337		if (error)
1338			goto unwind;
1339	}
1340	spin_unlock(&mp->m_sb_lock);
1341	return 0;
1342
1343unwind:
1344	while (--msbp >= msb) {
1345		error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1346						   -msbp->msb_delta, rsvd);
1347		ASSERT(error == 0);
1348	}
1349	spin_unlock(&mp->m_sb_lock);
1350	return error;
1351}
1352
1353/*
1354 * xfs_getsb() is called to obtain the buffer for the superblock.
1355 * The buffer is returned locked and read in from disk.
1356 * The buffer should be released with a call to xfs_brelse().
1357 *
1358 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1359 * the superblock buffer if it can be locked without sleeping.
1360 * If it can't then we'll return NULL.
1361 */
1362struct xfs_buf *
1363xfs_getsb(
1364	struct xfs_mount	*mp,
1365	int			flags)
1366{
1367	struct xfs_buf		*bp = mp->m_sb_bp;
1368
1369	if (!xfs_buf_trylock(bp)) {
1370		if (flags & XBF_TRYLOCK)
1371			return NULL;
1372		xfs_buf_lock(bp);
1373	}
1374
1375	xfs_buf_hold(bp);
1376	ASSERT(XFS_BUF_ISDONE(bp));
1377	return bp;
1378}
1379
1380/*
1381 * Used to free the superblock along various error paths.
1382 */
1383void
1384xfs_freesb(
1385	struct xfs_mount	*mp)
1386{
1387	struct xfs_buf		*bp = mp->m_sb_bp;
1388
1389	xfs_buf_lock(bp);
1390	mp->m_sb_bp = NULL;
1391	xfs_buf_relse(bp);
1392}
1393
1394/*
1395 * Used to log changes to the superblock unit and width fields which could
1396 * be altered by the mount options, as well as any potential sb_features2
1397 * fixup. Only the first superblock is updated.
1398 */
1399int
1400xfs_mount_log_sb(
1401	xfs_mount_t	*mp,
1402	__int64_t	fields)
1403{
1404	xfs_trans_t	*tp;
1405	int		error;
1406
1407	ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
1408			 XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
1409			 XFS_SB_VERSIONNUM));
1410
1411	tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1412	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
1413	if (error) {
1414		xfs_trans_cancel(tp, 0);
1415		return error;
1416	}
1417	xfs_mod_sb(tp, fields);
1418	error = xfs_trans_commit(tp, 0);
1419	return error;
1420}
1421
1422/*
1423 * If the underlying (data/log/rt) device is readonly, there are some
1424 * operations that cannot proceed.
1425 */
1426int
1427xfs_dev_is_read_only(
1428	struct xfs_mount	*mp,
1429	char			*message)
1430{
1431	if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1432	    xfs_readonly_buftarg(mp->m_logdev_targp) ||
1433	    (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1434		xfs_notice(mp, "%s required on read-only device.", message);
1435		xfs_notice(mp, "write access unavailable, cannot proceed.");
1436		return EROFS;
1437	}
1438	return 0;
1439}
1440
1441#ifdef HAVE_PERCPU_SB
1442/*
1443 * Per-cpu incore superblock counters
1444 *
1445 * Simple concept, difficult implementation
1446 *
1447 * Basically, replace the incore superblock counters with a distributed per cpu
1448 * counter for contended fields (e.g.  free block count).
1449 *
1450 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
1451 * hence needs to be accurately read when we are running low on space. Hence
1452 * there is a method to enable and disable the per-cpu counters based on how
1453 * much "stuff" is available in them.
1454 *
1455 * Basically, a counter is enabled if there is enough free resource to justify
1456 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
1457 * ENOSPC), then we disable the counters to synchronise all callers and
1458 * re-distribute the available resources.
1459 *
1460 * If, once we redistributed the available resources, we still get a failure,
1461 * we disable the per-cpu counter and go through the slow path.
1462 *
1463 * The slow path is the current xfs_mod_incore_sb() function.  This means that
1464 * when we disable a per-cpu counter, we need to drain its resources back to
1465 * the global superblock. We do this after disabling the counter to prevent
1466 * more threads from queueing up on the counter.
1467 *
1468 * Essentially, this means that we still need a lock in the fast path to enable
1469 * synchronisation between the global counters and the per-cpu counters. This
1470 * is not a problem because the lock will be local to a CPU almost all the time
1471 * and have little contention except when we get to ENOSPC conditions.
1472 *
1473 * Basically, this lock becomes a barrier that enables us to lock out the fast
1474 * path while we do things like enabling and disabling counters and
1475 * synchronising the counters.
1476 *
1477 * Locking rules:
1478 *
1479 * 	1. m_sb_lock before picking up per-cpu locks
1480 * 	2. per-cpu locks always picked up via for_each_online_cpu() order
1481 * 	3. accurate counter sync requires m_sb_lock + per cpu locks
1482 * 	4. modifying per-cpu counters requires holding per-cpu lock
1483 * 	5. modifying global counters requires holding m_sb_lock
1484 *	6. enabling or disabling a counter requires holding the m_sb_lock 
1485 *	   and _none_ of the per-cpu locks.
1486 *
1487 * Disabled counters are only ever re-enabled by a balance operation
1488 * that results in more free resources per CPU than a given threshold.
1489 * To ensure counters don't remain disabled, they are rebalanced when
1490 * the global resource goes above a higher threshold (i.e. some hysteresis
1491 * is present to prevent thrashing).
1492 */
1493
1494#ifdef CONFIG_HOTPLUG_CPU
1495/*
1496 * hot-plug CPU notifier support.
1497 *
1498 * We need a notifier per filesystem as we need to be able to identify
1499 * the filesystem to balance the counters out. This is achieved by
1500 * having a notifier block embedded in the xfs_mount_t and doing pointer
1501 * magic to get the mount pointer from the notifier block address.
1502 */
1503STATIC int
1504xfs_icsb_cpu_notify(
1505	struct notifier_block *nfb,
1506	unsigned long action,
1507	void *hcpu)
1508{
1509	xfs_icsb_cnts_t *cntp;
1510	xfs_mount_t	*mp;
1511
1512	mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
1513	cntp = (xfs_icsb_cnts_t *)
1514			per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
1515	switch (action) {
1516	case CPU_UP_PREPARE:
1517	case CPU_UP_PREPARE_FROZEN:
1518		/* Easy Case - initialize the area and locks, and
1519		 * then rebalance when online does everything else for us. */
1520		memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1521		break;
1522	case CPU_ONLINE:
1523	case CPU_ONLINE_FROZEN:
1524		xfs_icsb_lock(mp);
1525		xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1526		xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1527		xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1528		xfs_icsb_unlock(mp);
1529		break;
1530	case CPU_DEAD:
1531	case CPU_DEAD_FROZEN:
1532		/* Disable all the counters, then fold the dead cpu's
1533		 * count into the total on the global superblock and
1534		 * re-enable the counters. */
1535		xfs_icsb_lock(mp);
1536		spin_lock(&mp->m_sb_lock);
1537		xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1538		xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1539		xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
1540
1541		mp->m_sb.sb_icount += cntp->icsb_icount;
1542		mp->m_sb.sb_ifree += cntp->icsb_ifree;
1543		mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
1544
1545		memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1546
1547		xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
1548		xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
1549		xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
1550		spin_unlock(&mp->m_sb_lock);
1551		xfs_icsb_unlock(mp);
1552		break;
1553	}
1554
1555	return NOTIFY_OK;
1556}
1557#endif /* CONFIG_HOTPLUG_CPU */
1558
1559int
1560xfs_icsb_init_counters(
1561	xfs_mount_t	*mp)
1562{
1563	xfs_icsb_cnts_t *cntp;
1564	int		i;
1565
1566	mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
1567	if (mp->m_sb_cnts == NULL)
1568		return -ENOMEM;
1569
1570	for_each_online_cpu(i) {
1571		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1572		memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1573	}
1574
1575	mutex_init(&mp->m_icsb_mutex);
1576
1577	/*
1578	 * start with all counters disabled so that the
1579	 * initial balance kicks us off correctly
1580	 */
1581	mp->m_icsb_counters = -1;
1582
1583#ifdef CONFIG_HOTPLUG_CPU
1584	mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
1585	mp->m_icsb_notifier.priority = 0;
1586	register_hotcpu_notifier(&mp->m_icsb_notifier);
1587#endif /* CONFIG_HOTPLUG_CPU */
1588
1589	return 0;
1590}
1591
 
1592void
1593xfs_icsb_reinit_counters(
1594	xfs_mount_t	*mp)
1595{
1596	xfs_icsb_lock(mp);
1597	/*
1598	 * start with all counters disabled so that the
1599	 * initial balance kicks us off correctly
1600	 */
1601	mp->m_icsb_counters = -1;
1602	xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1603	xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1604	xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1605	xfs_icsb_unlock(mp);
1606}
1607
1608void
1609xfs_icsb_destroy_counters(
1610	xfs_mount_t	*mp)
1611{
1612	if (mp->m_sb_cnts) {
1613		unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1614		free_percpu(mp->m_sb_cnts);
1615	}
1616	mutex_destroy(&mp->m_icsb_mutex);
1617}
1618
1619STATIC void
1620xfs_icsb_lock_cntr(
1621	xfs_icsb_cnts_t	*icsbp)
1622{
1623	while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
1624		ndelay(1000);
1625	}
1626}
1627
1628STATIC void
1629xfs_icsb_unlock_cntr(
1630	xfs_icsb_cnts_t	*icsbp)
1631{
1632	clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
1633}
1634
1635
1636STATIC void
1637xfs_icsb_lock_all_counters(
1638	xfs_mount_t	*mp)
1639{
1640	xfs_icsb_cnts_t *cntp;
1641	int		i;
1642
1643	for_each_online_cpu(i) {
1644		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1645		xfs_icsb_lock_cntr(cntp);
1646	}
1647}
1648
1649STATIC void
1650xfs_icsb_unlock_all_counters(
1651	xfs_mount_t	*mp)
1652{
1653	xfs_icsb_cnts_t *cntp;
1654	int		i;
1655
1656	for_each_online_cpu(i) {
1657		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1658		xfs_icsb_unlock_cntr(cntp);
1659	}
1660}
1661
1662STATIC void
1663xfs_icsb_count(
1664	xfs_mount_t	*mp,
1665	xfs_icsb_cnts_t	*cnt,
1666	int		flags)
1667{
1668	xfs_icsb_cnts_t *cntp;
1669	int		i;
1670
1671	memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
1672
1673	if (!(flags & XFS_ICSB_LAZY_COUNT))
1674		xfs_icsb_lock_all_counters(mp);
1675
1676	for_each_online_cpu(i) {
1677		cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1678		cnt->icsb_icount += cntp->icsb_icount;
1679		cnt->icsb_ifree += cntp->icsb_ifree;
1680		cnt->icsb_fdblocks += cntp->icsb_fdblocks;
1681	}
1682
1683	if (!(flags & XFS_ICSB_LAZY_COUNT))
1684		xfs_icsb_unlock_all_counters(mp);
1685}
1686
1687STATIC int
1688xfs_icsb_counter_disabled(
1689	xfs_mount_t	*mp,
1690	xfs_sb_field_t	field)
1691{
1692	ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1693	return test_bit(field, &mp->m_icsb_counters);
1694}
1695
1696STATIC void
1697xfs_icsb_disable_counter(
1698	xfs_mount_t	*mp,
1699	xfs_sb_field_t	field)
1700{
1701	xfs_icsb_cnts_t	cnt;
1702
1703	ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1704
1705	/*
1706	 * If we are already disabled, then there is nothing to do
1707	 * here. We check before locking all the counters to avoid
1708	 * the expensive lock operation when being called in the
1709	 * slow path and the counter is already disabled. This is
1710	 * safe because the only time we set or clear this state is under
1711	 * the m_icsb_mutex.
1712	 */
1713	if (xfs_icsb_counter_disabled(mp, field))
1714		return;
1715
1716	xfs_icsb_lock_all_counters(mp);
1717	if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
1718		/* drain back to superblock */
1719
1720		xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
1721		switch(field) {
1722		case XFS_SBS_ICOUNT:
1723			mp->m_sb.sb_icount = cnt.icsb_icount;
1724			break;
1725		case XFS_SBS_IFREE:
1726			mp->m_sb.sb_ifree = cnt.icsb_ifree;
1727			break;
1728		case XFS_SBS_FDBLOCKS:
1729			mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1730			break;
1731		default:
1732			BUG();
1733		}
1734	}
1735
1736	xfs_icsb_unlock_all_counters(mp);
1737}
1738
1739STATIC void
1740xfs_icsb_enable_counter(
1741	xfs_mount_t	*mp,
1742	xfs_sb_field_t	field,
1743	uint64_t	count,
1744	uint64_t	resid)
1745{
1746	xfs_icsb_cnts_t	*cntp;
1747	int		i;
1748
1749	ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1750
1751	xfs_icsb_lock_all_counters(mp);
1752	for_each_online_cpu(i) {
1753		cntp = per_cpu_ptr(mp->m_sb_cnts, i);
1754		switch (field) {
1755		case XFS_SBS_ICOUNT:
1756			cntp->icsb_icount = count + resid;
1757			break;
1758		case XFS_SBS_IFREE:
1759			cntp->icsb_ifree = count + resid;
1760			break;
1761		case XFS_SBS_FDBLOCKS:
1762			cntp->icsb_fdblocks = count + resid;
1763			break;
1764		default:
1765			BUG();
1766			break;
1767		}
1768		resid = 0;
1769	}
1770	clear_bit(field, &mp->m_icsb_counters);
1771	xfs_icsb_unlock_all_counters(mp);
1772}
1773
1774void
1775xfs_icsb_sync_counters_locked(
1776	xfs_mount_t	*mp,
1777	int		flags)
1778{
1779	xfs_icsb_cnts_t	cnt;
1780
1781	xfs_icsb_count(mp, &cnt, flags);
1782
1783	if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
1784		mp->m_sb.sb_icount = cnt.icsb_icount;
1785	if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
1786		mp->m_sb.sb_ifree = cnt.icsb_ifree;
1787	if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
1788		mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1789}
1790
1791/*
1792 * Accurate update of per-cpu counters to incore superblock
1793 */
1794void
1795xfs_icsb_sync_counters(
1796	xfs_mount_t	*mp,
1797	int		flags)
1798{
1799	spin_lock(&mp->m_sb_lock);
1800	xfs_icsb_sync_counters_locked(mp, flags);
1801	spin_unlock(&mp->m_sb_lock);
1802}
1803
1804/*
1805 * Balance and enable/disable counters as necessary.
1806 *
1807 * Thresholds for re-enabling counters are somewhat magic.  inode counts are
1808 * chosen to be the same number as single on disk allocation chunk per CPU, and
1809 * free blocks is something far enough zero that we aren't going thrash when we
1810 * get near ENOSPC. We also need to supply a minimum we require per cpu to
1811 * prevent looping endlessly when xfs_alloc_space asks for more than will
1812 * be distributed to a single CPU but each CPU has enough blocks to be
1813 * reenabled.
1814 *
1815 * Note that we can be called when counters are already disabled.
1816 * xfs_icsb_disable_counter() optimises the counter locking in this case to
1817 * prevent locking every per-cpu counter needlessly.
1818 */
1819
1820#define XFS_ICSB_INO_CNTR_REENABLE	(uint64_t)64
1821#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
1822		(uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
1823STATIC void
1824xfs_icsb_balance_counter_locked(
1825	xfs_mount_t	*mp,
1826	xfs_sb_field_t  field,
1827	int		min_per_cpu)
1828{
1829	uint64_t	count, resid;
1830	int		weight = num_online_cpus();
1831	uint64_t	min = (uint64_t)min_per_cpu;
1832
1833	/* disable counter and sync counter */
1834	xfs_icsb_disable_counter(mp, field);
1835
1836	/* update counters  - first CPU gets residual*/
1837	switch (field) {
1838	case XFS_SBS_ICOUNT:
1839		count = mp->m_sb.sb_icount;
1840		resid = do_div(count, weight);
1841		if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
1842			return;
1843		break;
1844	case XFS_SBS_IFREE:
1845		count = mp->m_sb.sb_ifree;
1846		resid = do_div(count, weight);
1847		if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
1848			return;
1849		break;
1850	case XFS_SBS_FDBLOCKS:
1851		count = mp->m_sb.sb_fdblocks;
1852		resid = do_div(count, weight);
1853		if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
1854			return;
1855		break;
1856	default:
1857		BUG();
1858		count = resid = 0;	/* quiet, gcc */
1859		break;
1860	}
1861
1862	xfs_icsb_enable_counter(mp, field, count, resid);
1863}
1864
1865STATIC void
1866xfs_icsb_balance_counter(
1867	xfs_mount_t	*mp,
1868	xfs_sb_field_t  fields,
1869	int		min_per_cpu)
1870{
1871	spin_lock(&mp->m_sb_lock);
1872	xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
1873	spin_unlock(&mp->m_sb_lock);
1874}
1875
1876int
1877xfs_icsb_modify_counters(
1878	xfs_mount_t	*mp,
1879	xfs_sb_field_t	field,
1880	int64_t		delta,
1881	int		rsvd)
1882{
1883	xfs_icsb_cnts_t	*icsbp;
1884	long long	lcounter;	/* long counter for 64 bit fields */
1885	int		ret = 0;
1886
1887	might_sleep();
1888again:
1889	preempt_disable();
1890	icsbp = this_cpu_ptr(mp->m_sb_cnts);
1891
1892	/*
1893	 * if the counter is disabled, go to slow path
1894	 */
1895	if (unlikely(xfs_icsb_counter_disabled(mp, field)))
1896		goto slow_path;
1897	xfs_icsb_lock_cntr(icsbp);
1898	if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
1899		xfs_icsb_unlock_cntr(icsbp);
1900		goto slow_path;
1901	}
1902
1903	switch (field) {
1904	case XFS_SBS_ICOUNT:
1905		lcounter = icsbp->icsb_icount;
1906		lcounter += delta;
1907		if (unlikely(lcounter < 0))
1908			goto balance_counter;
1909		icsbp->icsb_icount = lcounter;
1910		break;
1911
1912	case XFS_SBS_IFREE:
1913		lcounter = icsbp->icsb_ifree;
1914		lcounter += delta;
1915		if (unlikely(lcounter < 0))
1916			goto balance_counter;
1917		icsbp->icsb_ifree = lcounter;
1918		break;
1919
1920	case XFS_SBS_FDBLOCKS:
1921		BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
1922
1923		lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1924		lcounter += delta;
1925		if (unlikely(lcounter < 0))
1926			goto balance_counter;
1927		icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1928		break;
1929	default:
1930		BUG();
1931		break;
1932	}
1933	xfs_icsb_unlock_cntr(icsbp);
1934	preempt_enable();
1935	return 0;
1936
1937slow_path:
1938	preempt_enable();
1939
1940	/*
1941	 * serialise with a mutex so we don't burn lots of cpu on
1942	 * the superblock lock. We still need to hold the superblock
1943	 * lock, however, when we modify the global structures.
1944	 */
1945	xfs_icsb_lock(mp);
1946
1947	/*
1948	 * Now running atomically.
1949	 *
1950	 * If the counter is enabled, someone has beaten us to rebalancing.
1951	 * Drop the lock and try again in the fast path....
1952	 */
1953	if (!(xfs_icsb_counter_disabled(mp, field))) {
1954		xfs_icsb_unlock(mp);
1955		goto again;
1956	}
1957
1958	/*
1959	 * The counter is currently disabled. Because we are
1960	 * running atomically here, we know a rebalance cannot
1961	 * be in progress. Hence we can go straight to operating
1962	 * on the global superblock. We do not call xfs_mod_incore_sb()
1963	 * here even though we need to get the m_sb_lock. Doing so
1964	 * will cause us to re-enter this function and deadlock.
1965	 * Hence we get the m_sb_lock ourselves and then call
1966	 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
1967	 * directly on the global counters.
1968	 */
1969	spin_lock(&mp->m_sb_lock);
1970	ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1971	spin_unlock(&mp->m_sb_lock);
1972
1973	/*
1974	 * Now that we've modified the global superblock, we
1975	 * may be able to re-enable the distributed counters
1976	 * (e.g. lots of space just got freed). After that
1977	 * we are done.
1978	 */
1979	if (ret != ENOSPC)
1980		xfs_icsb_balance_counter(mp, field, 0);
1981	xfs_icsb_unlock(mp);
1982	return ret;
1983
1984balance_counter:
1985	xfs_icsb_unlock_cntr(icsbp);
1986	preempt_enable();
1987
1988	/*
1989	 * We may have multiple threads here if multiple per-cpu
1990	 * counters run dry at the same time. This will mean we can
1991	 * do more balances than strictly necessary but it is not
1992	 * the common slowpath case.
1993	 */
1994	xfs_icsb_lock(mp);
1995
1996	/*
1997	 * running atomically.
1998	 *
1999	 * This will leave the counter in the correct state for future
2000	 * accesses. After the rebalance, we simply try again and our retry
2001	 * will either succeed through the fast path or slow path without
2002	 * another balance operation being required.
2003	 */
2004	xfs_icsb_balance_counter(mp, field, delta);
2005	xfs_icsb_unlock(mp);
2006	goto again;
2007}
2008
2009#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
 
  13#include "xfs_sb.h"
 
  14#include "xfs_mount.h"
 
  15#include "xfs_inode.h"
  16#include "xfs_dir2.h"
  17#include "xfs_ialloc.h"
  18#include "xfs_alloc.h"
  19#include "xfs_rtalloc.h"
  20#include "xfs_bmap.h"
  21#include "xfs_trans.h"
  22#include "xfs_trans_priv.h"
  23#include "xfs_log.h"
  24#include "xfs_error.h"
  25#include "xfs_quota.h"
  26#include "xfs_fsops.h"
 
  27#include "xfs_icache.h"
  28#include "xfs_sysfs.h"
  29#include "xfs_rmap_btree.h"
  30#include "xfs_refcount_btree.h"
  31#include "xfs_reflink.h"
  32#include "xfs_extent_busy.h"
  33#include "xfs_health.h"
  34#include "xfs_trace.h"
 
 
 
 
 
 
 
  35
  36static DEFINE_MUTEX(xfs_uuid_table_mutex);
  37static int xfs_uuid_table_size;
  38static uuid_t *xfs_uuid_table;
  39
  40void
  41xfs_uuid_table_free(void)
  42{
  43	if (xfs_uuid_table_size == 0)
  44		return;
  45	kmem_free(xfs_uuid_table);
  46	xfs_uuid_table = NULL;
  47	xfs_uuid_table_size = 0;
  48}
  49
  50/*
  51 * See if the UUID is unique among mounted XFS filesystems.
  52 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
  53 */
  54STATIC int
  55xfs_uuid_mount(
  56	struct xfs_mount	*mp)
  57{
  58	uuid_t			*uuid = &mp->m_sb.sb_uuid;
  59	int			hole, i;
  60
  61	/* Publish UUID in struct super_block */
  62	uuid_copy(&mp->m_super->s_uuid, uuid);
  63
  64	if (mp->m_flags & XFS_MOUNT_NOUUID)
  65		return 0;
  66
  67	if (uuid_is_null(uuid)) {
  68		xfs_warn(mp, "Filesystem has null UUID - can't mount");
  69		return -EINVAL;
  70	}
  71
  72	mutex_lock(&xfs_uuid_table_mutex);
  73	for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
  74		if (uuid_is_null(&xfs_uuid_table[i])) {
  75			hole = i;
  76			continue;
  77		}
  78		if (uuid_equal(uuid, &xfs_uuid_table[i]))
  79			goto out_duplicate;
  80	}
  81
  82	if (hole < 0) {
  83		xfs_uuid_table = kmem_realloc(xfs_uuid_table,
  84			(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
  85			0);
 
  86		hole = xfs_uuid_table_size++;
  87	}
  88	xfs_uuid_table[hole] = *uuid;
  89	mutex_unlock(&xfs_uuid_table_mutex);
  90
  91	return 0;
  92
  93 out_duplicate:
  94	mutex_unlock(&xfs_uuid_table_mutex);
  95	xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
  96	return -EINVAL;
  97}
  98
  99STATIC void
 100xfs_uuid_unmount(
 101	struct xfs_mount	*mp)
 102{
 103	uuid_t			*uuid = &mp->m_sb.sb_uuid;
 104	int			i;
 105
 106	if (mp->m_flags & XFS_MOUNT_NOUUID)
 107		return;
 108
 109	mutex_lock(&xfs_uuid_table_mutex);
 110	for (i = 0; i < xfs_uuid_table_size; i++) {
 111		if (uuid_is_null(&xfs_uuid_table[i]))
 112			continue;
 113		if (!uuid_equal(uuid, &xfs_uuid_table[i]))
 114			continue;
 115		memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
 116		break;
 117	}
 118	ASSERT(i < xfs_uuid_table_size);
 119	mutex_unlock(&xfs_uuid_table_mutex);
 120}
 121
 122
 123STATIC void
 124__xfs_free_perag(
 125	struct rcu_head	*head)
 126{
 127	struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
 128
 129	ASSERT(atomic_read(&pag->pag_ref) == 0);
 130	kmem_free(pag);
 131}
 132
 133/*
 134 * Free up the per-ag resources associated with the mount structure.
 135 */
 136STATIC void
 137xfs_free_perag(
 138	xfs_mount_t	*mp)
 139{
 140	xfs_agnumber_t	agno;
 141	struct xfs_perag *pag;
 142
 143	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
 144		spin_lock(&mp->m_perag_lock);
 145		pag = radix_tree_delete(&mp->m_perag_tree, agno);
 146		spin_unlock(&mp->m_perag_lock);
 147		ASSERT(pag);
 148		ASSERT(atomic_read(&pag->pag_ref) == 0);
 149		xfs_iunlink_destroy(pag);
 150		xfs_buf_hash_destroy(pag);
 151		call_rcu(&pag->rcu_head, __xfs_free_perag);
 152	}
 153}
 154
 155/*
 156 * Check size of device based on the (data/realtime) block count.
 157 * Note: this check is used by the growfs code as well as mount.
 158 */
 159int
 160xfs_sb_validate_fsb_count(
 161	xfs_sb_t	*sbp,
 162	uint64_t	nblocks)
 163{
 164	ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
 165	ASSERT(sbp->sb_blocklog >= BBSHIFT);
 166
 167	/* Limited by ULONG_MAX of page cache index */
 168	if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
 169		return -EFBIG;
 
 
 
 
 170	return 0;
 171}
 172
 173int
 174xfs_initialize_perag(
 175	xfs_mount_t	*mp,
 176	xfs_agnumber_t	agcount,
 177	xfs_agnumber_t	*maxagi)
 178{
 179	xfs_agnumber_t	index;
 180	xfs_agnumber_t	first_initialised = NULLAGNUMBER;
 181	xfs_perag_t	*pag;
 
 
 
 182	int		error = -ENOMEM;
 183
 184	/*
 185	 * Walk the current per-ag tree so we don't try to initialise AGs
 186	 * that already exist (growfs case). Allocate and insert all the
 187	 * AGs we don't find ready for initialisation.
 188	 */
 189	for (index = 0; index < agcount; index++) {
 190		pag = xfs_perag_get(mp, index);
 191		if (pag) {
 192			xfs_perag_put(pag);
 193			continue;
 194		}
 
 
 195
 196		pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
 197		if (!pag)
 198			goto out_unwind_new_pags;
 199		pag->pag_agno = index;
 200		pag->pag_mount = mp;
 201		spin_lock_init(&pag->pag_ici_lock);
 
 202		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
 203		if (xfs_buf_hash_init(pag))
 204			goto out_free_pag;
 205		init_waitqueue_head(&pag->pagb_wait);
 206		spin_lock_init(&pag->pagb_lock);
 207		pag->pagb_count = 0;
 208		pag->pagb_tree = RB_ROOT;
 209
 210		if (radix_tree_preload(GFP_NOFS))
 211			goto out_hash_destroy;
 212
 213		spin_lock(&mp->m_perag_lock);
 214		if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
 215			WARN_ON_ONCE(1);
 216			spin_unlock(&mp->m_perag_lock);
 217			radix_tree_preload_end();
 218			error = -EEXIST;
 219			goto out_hash_destroy;
 220		}
 221		spin_unlock(&mp->m_perag_lock);
 222		radix_tree_preload_end();
 223		/* first new pag is fully initialized */
 224		if (first_initialised == NULLAGNUMBER)
 225			first_initialised = index;
 226		error = xfs_iunlink_init(pag);
 227		if (error)
 228			goto out_hash_destroy;
 229		spin_lock_init(&pag->pag_state_lock);
 230	}
 231
 232	index = xfs_set_inode_alloc(mp, agcount);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 233
 234	if (maxagi)
 235		*maxagi = index;
 236
 237	mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
 238	return 0;
 239
 240out_hash_destroy:
 241	xfs_buf_hash_destroy(pag);
 242out_free_pag:
 243	kmem_free(pag);
 244out_unwind_new_pags:
 245	/* unwind any prior newly initialized pags */
 246	for (index = first_initialised; index < agcount; index++) {
 247		pag = radix_tree_delete(&mp->m_perag_tree, index);
 248		if (!pag)
 249			break;
 250		xfs_buf_hash_destroy(pag);
 251		xfs_iunlink_destroy(pag);
 252		kmem_free(pag);
 253	}
 254	return error;
 255}
 256
 257/*
 258 * xfs_readsb
 259 *
 260 * Does the initial read of the superblock.
 261 */
 262int
 263xfs_readsb(
 264	struct xfs_mount *mp,
 265	int		flags)
 266{
 267	unsigned int	sector_size;
 268	struct xfs_buf	*bp;
 269	struct xfs_sb	*sbp = &mp->m_sb;
 270	int		error;
 271	int		loud = !(flags & XFS_MFSI_QUIET);
 272	const struct xfs_buf_ops *buf_ops;
 273
 274	ASSERT(mp->m_sb_bp == NULL);
 275	ASSERT(mp->m_ddev_targp != NULL);
 276
 277	/*
 278	 * For the initial read, we must guess at the sector
 279	 * size based on the block device.  It's enough to
 280	 * get the sb_sectsize out of the superblock and
 281	 * then reread with the proper length.
 282	 * We don't verify it yet, because it may not be complete.
 283	 */
 284	sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
 285	buf_ops = NULL;
 286
 287	/*
 288	 * Allocate a (locked) buffer to hold the superblock. This will be kept
 289	 * around at all times to optimize access to the superblock. Therefore,
 290	 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
 291	 * elevated.
 292	 */
 293reread:
 294	error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
 295				      BTOBB(sector_size), XBF_NO_IOACCT, &bp,
 296				      buf_ops);
 297	if (error) {
 
 
 
 
 
 298		if (loud)
 299			xfs_warn(mp, "SB validate failed with error %d.", error);
 300		/* bad CRC means corrupted metadata */
 301		if (error == -EFSBADCRC)
 302			error = -EFSCORRUPTED;
 303		return error;
 304	}
 305
 306	/*
 307	 * Initialize the mount structure from the superblock.
 308	 */
 309	xfs_sb_from_disk(sbp, bp->b_addr);
 310
 311	/*
 312	 * If we haven't validated the superblock, do so now before we try
 313	 * to check the sector size and reread the superblock appropriately.
 314	 */
 315	if (sbp->sb_magicnum != XFS_SB_MAGIC) {
 316		if (loud)
 317			xfs_warn(mp, "Invalid superblock magic number");
 318		error = -EINVAL;
 319		goto release_buf;
 320	}
 321
 322	/*
 323	 * We must be able to do sector-sized and sector-aligned IO.
 324	 */
 325	if (sector_size > sbp->sb_sectsize) {
 326		if (loud)
 327			xfs_warn(mp, "device supports %u byte sectors (not %u)",
 328				sector_size, sbp->sb_sectsize);
 329		error = -ENOSYS;
 330		goto release_buf;
 331	}
 332
 
 
 
 
 333	if (buf_ops == NULL) {
 334		/*
 335		 * Re-read the superblock so the buffer is correctly sized,
 336		 * and properly verified.
 337		 */
 338		xfs_buf_relse(bp);
 339		sector_size = sbp->sb_sectsize;
 340		buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
 341		goto reread;
 342	}
 343
 344	xfs_reinit_percpu_counters(mp);
 
 345
 346	/* no need to be quiet anymore, so reset the buf ops */
 347	bp->b_ops = &xfs_sb_buf_ops;
 348
 349	mp->m_sb_bp = bp;
 350	xfs_buf_unlock(bp);
 351	return 0;
 352
 353release_buf:
 354	xfs_buf_relse(bp);
 355	return error;
 356}
 357
 358/*
 359 * If the sunit/swidth change would move the precomputed root inode value, we
 360 * must reject the ondisk change because repair will stumble over that.
 361 * However, we allow the mount to proceed because we never rejected this
 362 * combination before.  Returns true to update the sb, false otherwise.
 363 */
 364static inline int
 365xfs_check_new_dalign(
 366	struct xfs_mount	*mp,
 367	int			new_dalign,
 368	bool			*update_sb)
 369{
 370	struct xfs_sb		*sbp = &mp->m_sb;
 371	xfs_ino_t		calc_ino;
 372
 373	calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
 374	trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375
 376	if (sbp->sb_rootino == calc_ino) {
 377		*update_sb = true;
 378		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 379	}
 380
 381	xfs_warn(mp,
 382"Cannot change stripe alignment; would require moving root inode.");
 383
 384	/*
 385	 * XXX: Next time we add a new incompat feature, this should start
 386	 * returning -EINVAL to fail the mount.  Until then, spit out a warning
 387	 * that we're ignoring the administrator's instructions.
 388	 */
 389	xfs_warn(mp, "Skipping superblock stripe alignment update.");
 390	*update_sb = false;
 391	return 0;
 392}
 393
 394/*
 395 * If we were provided with new sunit/swidth values as mount options, make sure
 396 * that they pass basic alignment and superblock feature checks, and convert
 397 * them into the same units (FSB) that everything else expects.  This step
 398 * /must/ be done before computing the inode geometry.
 399 */
 400STATIC int
 401xfs_validate_new_dalign(
 402	struct xfs_mount	*mp)
 403{
 404	if (mp->m_dalign == 0)
 405		return 0;
 406
 407	/*
 408	 * If stripe unit and stripe width are not multiples
 409	 * of the fs blocksize turn off alignment.
 410	 */
 411	if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
 412	    (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
 413		xfs_warn(mp,
 414	"alignment check failed: sunit/swidth vs. blocksize(%d)",
 415			mp->m_sb.sb_blocksize);
 416		return -EINVAL;
 417	} else {
 418		/*
 419		 * Convert the stripe unit and width to FSBs.
 
 420		 */
 421		mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
 422		if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
 423			xfs_warn(mp,
 424		"alignment check failed: sunit/swidth vs. agsize(%d)",
 425				 mp->m_sb.sb_agblocks);
 426			return -EINVAL;
 427		} else if (mp->m_dalign) {
 428			mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
 429		} else {
 430			xfs_warn(mp,
 431		"alignment check failed: sunit(%d) less than bsize(%d)",
 432				 mp->m_dalign, mp->m_sb.sb_blocksize);
 433			return -EINVAL;
 434		}
 435	}
 436
 437	if (!xfs_sb_version_hasdalign(&mp->m_sb)) {
 438		xfs_warn(mp,
 439"cannot change alignment: superblock does not support data alignment");
 440		return -EINVAL;
 441	}
 442
 443	return 0;
 444}
 445
 446/* Update alignment values based on mount options and sb values. */
 447STATIC int
 448xfs_update_alignment(
 449	struct xfs_mount	*mp)
 
 
 
 
 450{
 451	struct xfs_sb		*sbp = &mp->m_sb;
 
 452
 453	if (mp->m_dalign) {
 454		bool		update_sb;
 455		int		error;
 
 
 
 
 
 
 
 
 
 456
 457		if (sbp->sb_unit == mp->m_dalign &&
 458		    sbp->sb_width == mp->m_swidth)
 459			return 0;
 460
 461		error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
 462		if (error || !update_sb)
 463			return error;
 464
 465		sbp->sb_unit = mp->m_dalign;
 466		sbp->sb_width = mp->m_swidth;
 467		mp->m_update_sb = true;
 468	} else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
 469		    xfs_sb_version_hasdalign(&mp->m_sb)) {
 470		mp->m_dalign = sbp->sb_unit;
 471		mp->m_swidth = sbp->sb_width;
 472	}
 473
 474	return 0;
 475}
 476
 477/*
 478 * precalculate the low space thresholds for dynamic speculative preallocation.
 479 */
 480void
 481xfs_set_low_space_thresholds(
 482	struct xfs_mount	*mp)
 483{
 484	int i;
 485
 486	for (i = 0; i < XFS_LOWSP_MAX; i++) {
 487		uint64_t space = mp->m_sb.sb_dblocks;
 488
 489		do_div(space, 100);
 490		mp->m_low_space[i] = space * (i + 1);
 491	}
 492}
 493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494/*
 495 * Check that the data (and log if separate) is an ok size.
 496 */
 497STATIC int
 498xfs_check_sizes(
 499	struct xfs_mount *mp)
 500{
 501	struct xfs_buf	*bp;
 502	xfs_daddr_t	d;
 503	int		error;
 504
 505	d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
 506	if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
 507		xfs_warn(mp, "filesystem size mismatch detected");
 508		return -EFBIG;
 509	}
 510	error = xfs_buf_read_uncached(mp->m_ddev_targp,
 511					d - XFS_FSS_TO_BB(mp, 1),
 512					XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
 513	if (error) {
 514		xfs_warn(mp, "last sector read failed");
 515		return error;
 516	}
 517	xfs_buf_relse(bp);
 518
 519	if (mp->m_logdev_targp == mp->m_ddev_targp)
 520		return 0;
 521
 522	d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
 523	if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
 524		xfs_warn(mp, "log size mismatch detected");
 525		return -EFBIG;
 526	}
 527	error = xfs_buf_read_uncached(mp->m_logdev_targp,
 528					d - XFS_FSB_TO_BB(mp, 1),
 529					XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
 530	if (error) {
 531		xfs_warn(mp, "log device read failed");
 532		return error;
 
 
 533	}
 534	xfs_buf_relse(bp);
 535	return 0;
 536}
 537
 538/*
 539 * Clear the quotaflags in memory and in the superblock.
 540 */
 541int
 542xfs_mount_reset_sbqflags(
 543	struct xfs_mount	*mp)
 544{
 
 
 
 545	mp->m_qflags = 0;
 546
 547	/* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
 
 
 
 548	if (mp->m_sb.sb_qflags == 0)
 549		return 0;
 550	spin_lock(&mp->m_sb_lock);
 551	mp->m_sb.sb_qflags = 0;
 552	spin_unlock(&mp->m_sb_lock);
 553
 554	if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
 
 
 
 
 555		return 0;
 556
 557	return xfs_sync_sb(mp, false);
 
 
 
 
 
 
 
 
 
 558}
 559
 560uint64_t
 561xfs_default_resblks(xfs_mount_t *mp)
 562{
 563	uint64_t resblks;
 564
 565	/*
 566	 * We default to 5% or 8192 fsbs of space reserved, whichever is
 567	 * smaller.  This is intended to cover concurrent allocation
 568	 * transactions when we initially hit enospc. These each require a 4
 569	 * block reservation. Hence by default we cover roughly 2000 concurrent
 570	 * allocation reservations.
 571	 */
 572	resblks = mp->m_sb.sb_dblocks;
 573	do_div(resblks, 20);
 574	resblks = min_t(uint64_t, resblks, 8192);
 575	return resblks;
 576}
 577
 578/* Ensure the summary counts are correct. */
 579STATIC int
 580xfs_check_summary_counts(
 581	struct xfs_mount	*mp)
 582{
 583	/*
 584	 * The AG0 superblock verifier rejects in-progress filesystems,
 585	 * so we should never see the flag set this far into mounting.
 586	 */
 587	if (mp->m_sb.sb_inprogress) {
 588		xfs_err(mp, "sb_inprogress set after log recovery??");
 589		WARN_ON(1);
 590		return -EFSCORRUPTED;
 591	}
 592
 593	/*
 594	 * Now the log is mounted, we know if it was an unclean shutdown or
 595	 * not. If it was, with the first phase of recovery has completed, we
 596	 * have consistent AG blocks on disk. We have not recovered EFIs yet,
 597	 * but they are recovered transactionally in the second recovery phase
 598	 * later.
 599	 *
 600	 * If the log was clean when we mounted, we can check the summary
 601	 * counters.  If any of them are obviously incorrect, we can recompute
 602	 * them from the AGF headers in the next step.
 603	 */
 604	if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
 605	    (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
 606	     !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
 607	     mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
 608		xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
 609
 610	/*
 611	 * We can safely re-initialise incore superblock counters from the
 612	 * per-ag data. These may not be correct if the filesystem was not
 613	 * cleanly unmounted, so we waited for recovery to finish before doing
 614	 * this.
 615	 *
 616	 * If the filesystem was cleanly unmounted or the previous check did
 617	 * not flag anything weird, then we can trust the values in the
 618	 * superblock to be correct and we don't need to do anything here.
 619	 * Otherwise, recalculate the summary counters.
 620	 */
 621	if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
 622	     XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
 623	    !xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
 624		return 0;
 625
 626	return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
 627}
 628
 629/*
 630 * This function does the following on an initial mount of a file system:
 631 *	- reads the superblock from disk and init the mount struct
 632 *	- if we're a 32-bit kernel, do a size check on the superblock
 633 *		so we don't mount terabyte filesystems
 634 *	- init mount struct realtime fields
 635 *	- allocate inode hash table for fs
 636 *	- init directory manager
 637 *	- perform recovery and init the log manager
 638 */
 639int
 640xfs_mountfs(
 641	struct xfs_mount	*mp)
 642{
 643	struct xfs_sb		*sbp = &(mp->m_sb);
 644	struct xfs_inode	*rip;
 645	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
 646	uint64_t		resblks;
 647	uint			quotamount = 0;
 648	uint			quotaflags = 0;
 649	int			error = 0;
 650
 651	xfs_sb_mount_common(mp, sbp);
 652
 653	/*
 654	 * Check for a mismatched features2 values.  Older kernels read & wrote
 655	 * into the wrong sb offset for sb_features2 on some platforms due to
 656	 * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
 657	 * which made older superblock reading/writing routines swap it as a
 658	 * 64-bit value.
 659	 *
 660	 * For backwards compatibility, we make both slots equal.
 661	 *
 662	 * If we detect a mismatched field, we OR the set bits into the existing
 663	 * features2 field in case it has already been modified; we don't want
 664	 * to lose any features.  We then update the bad location with the ORed
 665	 * value so that older kernels will see any features2 flags. The
 666	 * superblock writeback code ensures the new sb_features2 is copied to
 667	 * sb_bad_features2 before it is logged or written to disk.
 668	 */
 669	if (xfs_sb_has_mismatched_features2(sbp)) {
 670		xfs_warn(mp, "correcting sb_features alignment problem");
 671		sbp->sb_features2 |= sbp->sb_bad_features2;
 672		mp->m_update_sb = true;
 
 673
 674		/*
 675		 * Re-check for ATTR2 in case it was found in bad_features2
 676		 * slot.
 677		 */
 678		if (xfs_sb_version_hasattr2(&mp->m_sb) &&
 679		   !(mp->m_flags & XFS_MOUNT_NOATTR2))
 680			mp->m_flags |= XFS_MOUNT_ATTR2;
 681	}
 682
 683	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
 684	   (mp->m_flags & XFS_MOUNT_NOATTR2)) {
 685		xfs_sb_version_removeattr2(&mp->m_sb);
 686		mp->m_update_sb = true;
 687
 688		/* update sb_versionnum for the clearing of the morebits */
 689		if (!sbp->sb_features2)
 690			mp->m_update_sb = true;
 691	}
 692
 693	/* always use v2 inodes by default now */
 694	if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
 695		mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
 696		mp->m_update_sb = true;
 697	}
 698
 699	/*
 700	 * If we were given new sunit/swidth options, do some basic validation
 701	 * checks and convert the incore dalign and swidth values to the
 702	 * same units (FSB) that everything else uses.  This /must/ happen
 703	 * before computing the inode geometry.
 704	 */
 705	error = xfs_validate_new_dalign(mp);
 706	if (error)
 707		goto out;
 708
 709	xfs_alloc_compute_maxlevels(mp);
 710	xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
 711	xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
 712	xfs_ialloc_setup_geometry(mp);
 713	xfs_rmapbt_compute_maxlevels(mp);
 714	xfs_refcountbt_compute_maxlevels(mp);
 715
 716	/*
 717	 * Check if sb_agblocks is aligned at stripe boundary.  If sb_agblocks
 718	 * is NOT aligned turn off m_dalign since allocator alignment is within
 719	 * an ag, therefore ag has to be aligned at stripe boundary.  Note that
 720	 * we must compute the free space and rmap btree geometry before doing
 721	 * this.
 722	 */
 723	error = xfs_update_alignment(mp);
 724	if (error)
 725		goto out;
 726
 727	/* enable fail_at_unmount as default */
 728	mp->m_fail_unmount = true;
 729
 730	error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
 731			       NULL, mp->m_super->s_id);
 732	if (error)
 733		goto out;
 734
 735	error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
 736			       &mp->m_kobj, "stats");
 737	if (error)
 738		goto out_remove_sysfs;
 739
 740	error = xfs_error_sysfs_init(mp);
 741	if (error)
 742		goto out_del_stats;
 743
 744	error = xfs_errortag_init(mp);
 745	if (error)
 746		goto out_remove_error_sysfs;
 747
 748	error = xfs_uuid_mount(mp);
 749	if (error)
 750		goto out_remove_errortag;
 751
 752	/*
 753	 * Update the preferred write size based on the information from the
 754	 * on-disk superblock.
 755	 */
 756	mp->m_allocsize_log =
 757		max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
 758	mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
 759
 760	/* set the low space thresholds for dynamic preallocation */
 761	xfs_set_low_space_thresholds(mp);
 762
 763	/*
 764	 * If enabled, sparse inode chunk alignment is expected to match the
 765	 * cluster size. Full inode chunk alignment must match the chunk size,
 766	 * but that is checked on sb read verification...
 767	 */
 768	if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
 769	    mp->m_sb.sb_spino_align !=
 770			XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
 771		xfs_warn(mp,
 772	"Sparse inode block alignment (%u) must match cluster size (%llu).",
 773			 mp->m_sb.sb_spino_align,
 774			 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw));
 775		error = -EINVAL;
 776		goto out_remove_uuid;
 
 
 
 777	}
 778
 779	/*
 
 
 
 
 
 780	 * Check that the data (and log if separate) is an ok size.
 781	 */
 782	error = xfs_check_sizes(mp);
 783	if (error)
 784		goto out_remove_uuid;
 785
 786	/*
 787	 * Initialize realtime fields in the mount structure
 788	 */
 789	error = xfs_rtmount_init(mp);
 790	if (error) {
 791		xfs_warn(mp, "RT mount failed");
 792		goto out_remove_uuid;
 793	}
 794
 795	/*
 796	 *  Copies the low order bits of the timestamp and the randomly
 797	 *  set "sequence" number out of a UUID.
 798	 */
 799	mp->m_fixedfsid[0] =
 800		(get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
 801		 get_unaligned_be16(&sbp->sb_uuid.b[4]);
 802	mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
 803
 804	error = xfs_da_mount(mp);
 805	if (error) {
 806		xfs_warn(mp, "Failed dir/attr init: %d", error);
 807		goto out_remove_uuid;
 808	}
 
 809
 810	/*
 811	 * Initialize the precomputed transaction reservations values.
 812	 */
 813	xfs_trans_init(mp);
 814
 815	/*
 816	 * Allocate and initialize the per-ag data.
 817	 */
 
 
 818	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
 819	if (error) {
 820		xfs_warn(mp, "Failed per-ag init: %d", error);
 821		goto out_free_dir;
 822	}
 823
 824	if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
 825		xfs_warn(mp, "no log defined");
 826		error = -EFSCORRUPTED;
 
 827		goto out_free_perag;
 828	}
 829
 830	/*
 831	 * Log's mount-time initialization. The first part of recovery can place
 832	 * some items on the AIL, to be handled when recovery is finished or
 833	 * cancelled.
 834	 */
 835	error = xfs_log_mount(mp, mp->m_logdev_targp,
 836			      XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
 837			      XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
 838	if (error) {
 839		xfs_warn(mp, "log mount failed");
 840		goto out_fail_wait;
 841	}
 842
 843	/* Make sure the summary counts are ok. */
 844	error = xfs_check_summary_counts(mp);
 845	if (error)
 846		goto out_log_dealloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847
 848	/*
 849	 * Get and sanity-check the root inode.
 850	 * Save the pointer to it in the mount structure.
 851	 */
 852	error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
 853			 XFS_ILOCK_EXCL, &rip);
 854	if (error) {
 855		xfs_warn(mp,
 856			"Failed to read root inode 0x%llx, error %d",
 857			sbp->sb_rootino, -error);
 858		goto out_log_dealloc;
 859	}
 860
 861	ASSERT(rip != NULL);
 862
 863	if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
 864		xfs_warn(mp, "corrupted root inode %llu: not a directory",
 865			(unsigned long long)rip->i_ino);
 866		xfs_iunlock(rip, XFS_ILOCK_EXCL);
 867		error = -EFSCORRUPTED;
 
 
 868		goto out_rele_rip;
 869	}
 870	mp->m_rootip = rip;	/* save it */
 871
 872	xfs_iunlock(rip, XFS_ILOCK_EXCL);
 873
 874	/*
 875	 * Initialize realtime inode pointers in the mount structure
 876	 */
 877	error = xfs_rtmount_inodes(mp);
 878	if (error) {
 879		/*
 880		 * Free up the root inode.
 881		 */
 882		xfs_warn(mp, "failed to read RT inodes");
 883		goto out_rele_rip;
 884	}
 885
 886	/*
 887	 * If this is a read-only mount defer the superblock updates until
 888	 * the next remount into writeable mode.  Otherwise we would never
 889	 * perform the update e.g. for the root filesystem.
 890	 */
 891	if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
 892		error = xfs_sync_sb(mp, false);
 893		if (error) {
 894			xfs_warn(mp, "failed to write sb changes");
 895			goto out_rtunmount;
 896		}
 897	}
 898
 899	/*
 900	 * Initialise the XFS quota management subsystem for this mount
 901	 */
 902	if (XFS_IS_QUOTA_RUNNING(mp)) {
 903		error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
 904		if (error)
 905			goto out_rtunmount;
 906	} else {
 907		ASSERT(!XFS_IS_QUOTA_ON(mp));
 908
 909		/*
 910		 * If a file system had quotas running earlier, but decided to
 911		 * mount without -o uquota/pquota/gquota options, revoke the
 912		 * quotachecked license.
 913		 */
 914		if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
 915			xfs_notice(mp, "resetting quota flags");
 916			error = xfs_mount_reset_sbqflags(mp);
 917			if (error)
 918				goto out_rtunmount;
 919		}
 920	}
 921
 922	/*
 923	 * Finish recovering the file system.  This part needed to be delayed
 924	 * until after the root and real-time bitmap inodes were consistently
 925	 * read in.
 926	 */
 927	error = xfs_log_mount_finish(mp);
 928	if (error) {
 929		xfs_warn(mp, "log mount finish failed");
 930		goto out_rtunmount;
 931	}
 932
 933	/*
 934	 * Now the log is fully replayed, we can transition to full read-only
 935	 * mode for read-only mounts. This will sync all the metadata and clean
 936	 * the log so that the recovery we just performed does not have to be
 937	 * replayed again on the next mount.
 938	 *
 939	 * We use the same quiesce mechanism as the rw->ro remount, as they are
 940	 * semantically identical operations.
 941	 */
 942	if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
 943							XFS_MOUNT_RDONLY) {
 944		xfs_quiesce_attr(mp);
 945	}
 946
 947	/*
 948	 * Complete the quota initialisation, post-log-replay component.
 949	 */
 950	if (quotamount) {
 951		ASSERT(mp->m_qflags == 0);
 952		mp->m_qflags = quotaflags;
 953
 954		xfs_qm_mount_quotas(mp);
 955	}
 956
 957	/*
 958	 * Now we are mounted, reserve a small amount of unused space for
 959	 * privileged transactions. This is needed so that transaction
 960	 * space required for critical operations can dip into this pool
 961	 * when at ENOSPC. This is needed for operations like create with
 962	 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
 963	 * are not allowed to use this reserved space.
 964	 *
 965	 * This may drive us straight to ENOSPC on mount, but that implies
 966	 * we were already there on the last unmount. Warn if this occurs.
 967	 */
 968	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
 969		resblks = xfs_default_resblks(mp);
 970		error = xfs_reserve_blocks(mp, &resblks, NULL);
 971		if (error)
 972			xfs_warn(mp,
 973	"Unable to allocate reserve blocks. Continuing without reserve pool.");
 974
 975		/* Recover any CoW blocks that never got remapped. */
 976		error = xfs_reflink_recover_cow(mp);
 977		if (error) {
 978			xfs_err(mp,
 979	"Error %d recovering leftover CoW allocations.", error);
 980			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 981			goto out_quota;
 982		}
 983
 984		/* Reserve AG blocks for future btree expansion. */
 985		error = xfs_fs_reserve_ag_blocks(mp);
 986		if (error && error != -ENOSPC)
 987			goto out_agresv;
 988	}
 989
 990	return 0;
 991
 992 out_agresv:
 993	xfs_fs_unreserve_ag_blocks(mp);
 994 out_quota:
 995	xfs_qm_unmount_quotas(mp);
 996 out_rtunmount:
 997	xfs_rtunmount_inodes(mp);
 998 out_rele_rip:
 999	xfs_irele(rip);
1000	/* Clean out dquots that might be in memory after quotacheck. */
1001	xfs_qm_unmount(mp);
1002	/*
1003	 * Cancel all delayed reclaim work and reclaim the inodes directly.
1004	 * We have to do this /after/ rtunmount and qm_unmount because those
1005	 * two will have scheduled delayed reclaim for the rt/quota inodes.
1006	 *
1007	 * This is slightly different from the unmountfs call sequence
1008	 * because we could be tearing down a partially set up mount.  In
1009	 * particular, if log_mount_finish fails we bail out without calling
1010	 * qm_unmount_quotas and therefore rely on qm_unmount to release the
1011	 * quota inodes.
1012	 */
1013	cancel_delayed_work_sync(&mp->m_reclaim_work);
1014	xfs_reclaim_inodes(mp);
1015	xfs_health_unmount(mp);
1016 out_log_dealloc:
1017	mp->m_flags |= XFS_MOUNT_UNMOUNTING;
1018	xfs_log_mount_cancel(mp);
1019 out_fail_wait:
1020	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1021		xfs_wait_buftarg(mp->m_logdev_targp);
1022	xfs_wait_buftarg(mp->m_ddev_targp);
1023 out_free_perag:
1024	xfs_free_perag(mp);
1025 out_free_dir:
1026	xfs_da_unmount(mp);
1027 out_remove_uuid:
1028	xfs_uuid_unmount(mp);
1029 out_remove_errortag:
1030	xfs_errortag_del(mp);
1031 out_remove_error_sysfs:
1032	xfs_error_sysfs_del(mp);
1033 out_del_stats:
1034	xfs_sysfs_del(&mp->m_stats.xs_kobj);
1035 out_remove_sysfs:
1036	xfs_sysfs_del(&mp->m_kobj);
1037 out:
1038	return error;
1039}
1040
1041/*
1042 * This flushes out the inodes,dquots and the superblock, unmounts the
1043 * log and makes sure that incore structures are freed.
1044 */
1045void
1046xfs_unmountfs(
1047	struct xfs_mount	*mp)
1048{
1049	uint64_t		resblks;
1050	int			error;
1051
1052	xfs_stop_block_reaping(mp);
1053	xfs_fs_unreserve_ag_blocks(mp);
1054	xfs_qm_unmount_quotas(mp);
1055	xfs_rtunmount_inodes(mp);
1056	xfs_irele(mp->m_rootip);
1057
1058	/*
1059	 * We can potentially deadlock here if we have an inode cluster
1060	 * that has been freed has its buffer still pinned in memory because
1061	 * the transaction is still sitting in a iclog. The stale inodes
1062	 * on that buffer will have their flush locks held until the
1063	 * transaction hits the disk and the callbacks run. the inode
1064	 * flush takes the flush lock unconditionally and with nothing to
1065	 * push out the iclog we will never get that unlocked. hence we
1066	 * need to force the log first.
1067	 */
1068	xfs_log_force(mp, XFS_LOG_SYNC);
1069
1070	/*
1071	 * Wait for all busy extents to be freed, including completion of
1072	 * any discard operation.
1073	 */
1074	xfs_extent_busy_wait_all(mp);
1075	flush_workqueue(xfs_discard_wq);
1076
1077	/*
1078	 * We now need to tell the world we are unmounting. This will allow
1079	 * us to detect that the filesystem is going away and we should error
1080	 * out anything that we have been retrying in the background. This will
1081	 * prevent neverending retries in AIL pushing from hanging the unmount.
1082	 */
1083	mp->m_flags |= XFS_MOUNT_UNMOUNTING;
1084
1085	/*
1086	 * Flush all pending changes from the AIL.
1087	 */
1088	xfs_ail_push_all_sync(mp->m_ail);
1089
1090	/*
1091	 * Reclaim all inodes. At this point there should be no dirty inodes and
1092	 * none should be pinned or locked. Stop background inode reclaim here
1093	 * if it is still running.
 
1094	 */
1095	cancel_delayed_work_sync(&mp->m_reclaim_work);
1096	xfs_reclaim_inodes(mp);
1097	xfs_health_unmount(mp);
1098
1099	xfs_qm_unmount(mp);
1100
1101	/*
1102	 * Unreserve any blocks we have so that when we unmount we don't account
1103	 * the reserved free space as used. This is really only necessary for
1104	 * lazy superblock counting because it trusts the incore superblock
1105	 * counters to be absolutely correct on clean unmount.
1106	 *
1107	 * We don't bother correcting this elsewhere for lazy superblock
1108	 * counting because on mount of an unclean filesystem we reconstruct the
1109	 * correct counter value and this is irrelevant.
1110	 *
1111	 * For non-lazy counter filesystems, this doesn't matter at all because
1112	 * we only every apply deltas to the superblock and hence the incore
1113	 * value does not matter....
1114	 */
1115	resblks = 0;
1116	error = xfs_reserve_blocks(mp, &resblks, NULL);
1117	if (error)
1118		xfs_warn(mp, "Unable to free reserved block pool. "
1119				"Freespace may not be correct on next mount.");
1120
1121	error = xfs_log_sbcount(mp);
1122	if (error)
1123		xfs_warn(mp, "Unable to update superblock counters. "
1124				"Freespace may not be correct on next mount.");
1125
1126
1127	xfs_log_unmount(mp);
1128	xfs_da_unmount(mp);
1129	xfs_uuid_unmount(mp);
1130
1131#if defined(DEBUG)
1132	xfs_errortag_clearall(mp);
1133#endif
1134	xfs_free_perag(mp);
1135
1136	xfs_errortag_del(mp);
1137	xfs_error_sysfs_del(mp);
1138	xfs_sysfs_del(&mp->m_stats.xs_kobj);
1139	xfs_sysfs_del(&mp->m_kobj);
1140}
1141
1142/*
1143 * Determine whether modifications can proceed. The caller specifies the minimum
1144 * freeze level for which modifications should not be allowed. This allows
1145 * certain operations to proceed while the freeze sequence is in progress, if
1146 * necessary.
1147 */
1148bool
1149xfs_fs_writable(
1150	struct xfs_mount	*mp,
1151	int			level)
1152{
1153	ASSERT(level > SB_UNFROZEN);
1154	if ((mp->m_super->s_writers.frozen >= level) ||
1155	    XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
1156		return false;
1157
1158	return true;
1159}
1160
1161/*
1162 * xfs_log_sbcount
1163 *
1164 * Sync the superblock counters to disk.
1165 *
1166 * Note this code can be called during the process of freezing, so we use the
1167 * transaction allocator that does not block when the transaction subsystem is
1168 * in its frozen state.
1169 */
1170int
1171xfs_log_sbcount(xfs_mount_t *mp)
1172{
1173	/* allow this to proceed during the freeze sequence... */
1174	if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
 
 
1175		return 0;
1176
 
 
1177	/*
1178	 * we don't need to do this if we are updating the superblock
1179	 * counters on every modification.
1180	 */
1181	if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1182		return 0;
1183
1184	return xfs_sync_sb(mp, true);
 
 
 
 
 
 
 
 
 
 
1185}
1186
1187/*
1188 * Deltas for the block count can vary from 1 to very large, but lock contention
1189 * only occurs on frequent small block count updates such as in the delayed
1190 * allocation path for buffered writes (page a time updates). Hence we set
1191 * a large batch count (1024) to minimise global counter updates except when
1192 * we get near to ENOSPC and we have to be very accurate with our updates.
 
 
1193 */
1194#define XFS_FDBLOCKS_BATCH	1024
1195int
1196xfs_mod_fdblocks(
1197	struct xfs_mount	*mp,
1198	int64_t			delta,
1199	bool			rsvd)
1200{
1201	int64_t			lcounter;
1202	long long		res_used;
1203	s32			batch;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204
1205	if (delta > 0) {
1206		/*
1207		 * If the reserve pool is depleted, put blocks back into it
1208		 * first. Most of the time the pool is full.
1209		 */
1210		if (likely(mp->m_resblks == mp->m_resblks_avail)) {
1211			percpu_counter_add(&mp->m_fdblocks, delta);
1212			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1213		}
1214
1215		spin_lock(&mp->m_sb_lock);
1216		res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1217
1218		if (res_used > delta) {
1219			mp->m_resblks_avail += delta;
1220		} else {
1221			delta -= res_used;
1222			mp->m_resblks_avail = mp->m_resblks;
1223			percpu_counter_add(&mp->m_fdblocks, delta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1224		}
1225		spin_unlock(&mp->m_sb_lock);
1226		return 0;
 
 
 
1227	}
 
1228
1229	/*
1230	 * Taking blocks away, need to be more accurate the closer we
1231	 * are to zero.
1232	 *
1233	 * If the counter has a value of less than 2 * max batch size,
1234	 * then make everything serialise as we are real close to
1235	 * ENOSPC.
1236	 */
1237	if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
1238				     XFS_FDBLOCKS_BATCH) < 0)
1239		batch = 1;
1240	else
1241		batch = XFS_FDBLOCKS_BATCH;
 
1242
1243	percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
1244	if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
1245				     XFS_FDBLOCKS_BATCH) >= 0) {
1246		/* we had space! */
1247		return 0;
1248	}
1249
1250	/*
1251	 * lock up the sb for dipping into reserves before releasing the space
1252	 * that took us to ENOSPC.
1253	 */
1254	spin_lock(&mp->m_sb_lock);
1255	percpu_counter_add(&mp->m_fdblocks, -delta);
1256	if (!rsvd)
1257		goto fdblocks_enospc;
1258
1259	lcounter = (long long)mp->m_resblks_avail + delta;
1260	if (lcounter >= 0) {
1261		mp->m_resblks_avail = lcounter;
1262		spin_unlock(&mp->m_sb_lock);
1263		return 0;
1264	}
1265	xfs_warn_once(mp,
1266"Reserve blocks depleted! Consider increasing reserve pool size.");
1267
1268fdblocks_enospc:
1269	spin_unlock(&mp->m_sb_lock);
1270	return -ENOSPC;
1271}
1272
 
 
 
 
 
 
 
 
 
 
 
 
1273int
1274xfs_mod_frextents(
1275	struct xfs_mount	*mp,
1276	int64_t			delta)
 
 
1277{
1278	int64_t			lcounter;
1279	int			ret = 0;
1280
 
 
 
 
 
 
1281	spin_lock(&mp->m_sb_lock);
1282	lcounter = mp->m_sb.sb_frextents + delta;
1283	if (lcounter < 0)
1284		ret = -ENOSPC;
1285	else
1286		mp->m_sb.sb_frextents = lcounter;
 
 
 
 
 
 
 
 
 
 
 
 
 
1287	spin_unlock(&mp->m_sb_lock);
1288	return ret;
1289}
1290
1291/*
1292 * xfs_getsb() is called to obtain the buffer for the superblock.
1293 * The buffer is returned locked and read in from disk.
1294 * The buffer should be released with a call to xfs_brelse().
 
 
 
 
1295 */
1296struct xfs_buf *
1297xfs_getsb(
1298	struct xfs_mount	*mp)
 
1299{
1300	struct xfs_buf		*bp = mp->m_sb_bp;
1301
1302	xfs_buf_lock(bp);
 
 
 
 
 
1303	xfs_buf_hold(bp);
1304	ASSERT(bp->b_flags & XBF_DONE);
1305	return bp;
1306}
1307
1308/*
1309 * Used to free the superblock along various error paths.
1310 */
1311void
1312xfs_freesb(
1313	struct xfs_mount	*mp)
1314{
1315	struct xfs_buf		*bp = mp->m_sb_bp;
1316
1317	xfs_buf_lock(bp);
1318	mp->m_sb_bp = NULL;
1319	xfs_buf_relse(bp);
1320}
1321
1322/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1323 * If the underlying (data/log/rt) device is readonly, there are some
1324 * operations that cannot proceed.
1325 */
1326int
1327xfs_dev_is_read_only(
1328	struct xfs_mount	*mp,
1329	char			*message)
1330{
1331	if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1332	    xfs_readonly_buftarg(mp->m_logdev_targp) ||
1333	    (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1334		xfs_notice(mp, "%s required on read-only device.", message);
1335		xfs_notice(mp, "write access unavailable, cannot proceed.");
1336		return -EROFS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1337	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1338	return 0;
1339}
1340
1341/* Force the summary counters to be recalculated at next mount. */
1342void
1343xfs_force_summary_recalc(
1344	struct xfs_mount	*mp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1345{
1346	if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
 
 
 
 
 
 
 
 
 
 
 
 
1347		return;
1348
1349	xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1350}
1351
1352/*
1353 * Update the in-core delayed block counter.
1354 *
1355 * We prefer to update the counter without having to take a spinlock for every
1356 * counter update (i.e. batching).  Each change to delayed allocation
1357 * reservations can change can easily exceed the default percpu counter
1358 * batching, so we use a larger batch factor here.
 
 
 
1359 *
1360 * Note that we don't currently have any callers requiring fast summation
1361 * (e.g. percpu_counter_read) so we can use a big batch value here.
 
1362 */
1363#define XFS_DELALLOC_BATCH	(4096)
1364void
1365xfs_mod_delalloc(
1366	struct xfs_mount	*mp,
1367	int64_t			delta)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1368{
1369	percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
1370			XFS_DELALLOC_BATCH);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1371}