Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6
   7#include "xfs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_sb.h"
  13#include "xfs_mount.h"
  14#include "xfs_inode.h"
  15#include "xfs_btree.h"
  16#include "xfs_bmap.h"
  17#include "xfs_alloc.h"
  18#include "xfs_fsops.h"
  19#include "xfs_trans.h"
  20#include "xfs_buf_item.h"
  21#include "xfs_log.h"
  22#include "xfs_log_priv.h"
  23#include "xfs_dir2.h"
  24#include "xfs_extfree_item.h"
  25#include "xfs_mru_cache.h"
  26#include "xfs_inode_item.h"
  27#include "xfs_icache.h"
  28#include "xfs_trace.h"
  29#include "xfs_icreate_item.h"
  30#include "xfs_filestream.h"
  31#include "xfs_quota.h"
  32#include "xfs_sysfs.h"
  33#include "xfs_ondisk.h"
  34#include "xfs_rmap_item.h"
  35#include "xfs_refcount_item.h"
  36#include "xfs_bmap_item.h"
  37#include "xfs_reflink.h"
  38#include "xfs_pwork.h"
  39#include "xfs_ag.h"
 
 
 
 
  40
  41#include <linux/magic.h>
  42#include <linux/fs_context.h>
  43#include <linux/fs_parser.h>
  44
  45static const struct super_operations xfs_super_operations;
  46
  47static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
  48#ifdef DEBUG
  49static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
  50#endif
  51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52enum xfs_dax_mode {
  53	XFS_DAX_INODE = 0,
  54	XFS_DAX_ALWAYS = 1,
  55	XFS_DAX_NEVER = 2,
  56};
  57
  58static void
  59xfs_mount_set_dax_mode(
  60	struct xfs_mount	*mp,
  61	enum xfs_dax_mode	mode)
  62{
  63	switch (mode) {
  64	case XFS_DAX_INODE:
  65		mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
  66		break;
  67	case XFS_DAX_ALWAYS:
  68		mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
  69		mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
  70		break;
  71	case XFS_DAX_NEVER:
  72		mp->m_flags |= XFS_MOUNT_DAX_NEVER;
  73		mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
  74		break;
  75	}
  76}
  77
  78static const struct constant_table dax_param_enums[] = {
  79	{"inode",	XFS_DAX_INODE },
  80	{"always",	XFS_DAX_ALWAYS },
  81	{"never",	XFS_DAX_NEVER },
  82	{}
  83};
  84
  85/*
  86 * Table driven mount option parser.
  87 */
  88enum {
  89	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
  90	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
  91	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
  92	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
  93	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
  94	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
  95	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
  96	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
  97	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
  98};
  99
 100static const struct fs_parameter_spec xfs_fs_parameters[] = {
 101	fsparam_u32("logbufs",		Opt_logbufs),
 102	fsparam_string("logbsize",	Opt_logbsize),
 103	fsparam_string("logdev",	Opt_logdev),
 104	fsparam_string("rtdev",		Opt_rtdev),
 105	fsparam_flag("wsync",		Opt_wsync),
 106	fsparam_flag("noalign",		Opt_noalign),
 107	fsparam_flag("swalloc",		Opt_swalloc),
 108	fsparam_u32("sunit",		Opt_sunit),
 109	fsparam_u32("swidth",		Opt_swidth),
 110	fsparam_flag("nouuid",		Opt_nouuid),
 111	fsparam_flag("grpid",		Opt_grpid),
 112	fsparam_flag("nogrpid",		Opt_nogrpid),
 113	fsparam_flag("bsdgroups",	Opt_bsdgroups),
 114	fsparam_flag("sysvgroups",	Opt_sysvgroups),
 115	fsparam_string("allocsize",	Opt_allocsize),
 116	fsparam_flag("norecovery",	Opt_norecovery),
 117	fsparam_flag("inode64",		Opt_inode64),
 118	fsparam_flag("inode32",		Opt_inode32),
 119	fsparam_flag("ikeep",		Opt_ikeep),
 120	fsparam_flag("noikeep",		Opt_noikeep),
 121	fsparam_flag("largeio",		Opt_largeio),
 122	fsparam_flag("nolargeio",	Opt_nolargeio),
 123	fsparam_flag("attr2",		Opt_attr2),
 124	fsparam_flag("noattr2",		Opt_noattr2),
 125	fsparam_flag("filestreams",	Opt_filestreams),
 126	fsparam_flag("quota",		Opt_quota),
 127	fsparam_flag("noquota",		Opt_noquota),
 128	fsparam_flag("usrquota",	Opt_usrquota),
 129	fsparam_flag("grpquota",	Opt_grpquota),
 130	fsparam_flag("prjquota",	Opt_prjquota),
 131	fsparam_flag("uquota",		Opt_uquota),
 132	fsparam_flag("gquota",		Opt_gquota),
 133	fsparam_flag("pquota",		Opt_pquota),
 134	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
 135	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
 136	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
 137	fsparam_flag("qnoenforce",	Opt_qnoenforce),
 138	fsparam_flag("discard",		Opt_discard),
 139	fsparam_flag("nodiscard",	Opt_nodiscard),
 140	fsparam_flag("dax",		Opt_dax),
 141	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
 142	{}
 143};
 144
 145struct proc_xfs_info {
 146	uint64_t	flag;
 147	char		*str;
 148};
 149
 150static int
 151xfs_fs_show_options(
 152	struct seq_file		*m,
 153	struct dentry		*root)
 154{
 155	static struct proc_xfs_info xfs_info_set[] = {
 156		/* the few simple ones we can get from the mount struct */
 157		{ XFS_MOUNT_IKEEP,		",ikeep" },
 158		{ XFS_MOUNT_WSYNC,		",wsync" },
 159		{ XFS_MOUNT_NOALIGN,		",noalign" },
 160		{ XFS_MOUNT_SWALLOC,		",swalloc" },
 161		{ XFS_MOUNT_NOUUID,		",nouuid" },
 162		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
 163		{ XFS_MOUNT_ATTR2,		",attr2" },
 164		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
 165		{ XFS_MOUNT_GRPID,		",grpid" },
 166		{ XFS_MOUNT_DISCARD,		",discard" },
 167		{ XFS_MOUNT_LARGEIO,		",largeio" },
 168		{ XFS_MOUNT_DAX_ALWAYS,		",dax=always" },
 169		{ XFS_MOUNT_DAX_NEVER,		",dax=never" },
 170		{ 0, NULL }
 171	};
 172	struct xfs_mount	*mp = XFS_M(root->d_sb);
 173	struct proc_xfs_info	*xfs_infop;
 174
 175	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 176		if (mp->m_flags & xfs_infop->flag)
 177			seq_puts(m, xfs_infop->str);
 178	}
 179
 180	seq_printf(m, ",inode%d",
 181		(mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
 182
 183	if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
 184		seq_printf(m, ",allocsize=%dk",
 185			   (1 << mp->m_allocsize_log) >> 10);
 186
 187	if (mp->m_logbufs > 0)
 188		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
 189	if (mp->m_logbsize > 0)
 190		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
 191
 192	if (mp->m_logname)
 193		seq_show_option(m, "logdev", mp->m_logname);
 194	if (mp->m_rtname)
 195		seq_show_option(m, "rtdev", mp->m_rtname);
 196
 197	if (mp->m_dalign > 0)
 198		seq_printf(m, ",sunit=%d",
 199				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 200	if (mp->m_swidth > 0)
 201		seq_printf(m, ",swidth=%d",
 202				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 203
 204	if (mp->m_qflags & XFS_UQUOTA_ACCT) {
 205		if (mp->m_qflags & XFS_UQUOTA_ENFD)
 206			seq_puts(m, ",usrquota");
 207		else
 208			seq_puts(m, ",uqnoenforce");
 209	}
 210
 211	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
 212		if (mp->m_qflags & XFS_PQUOTA_ENFD)
 213			seq_puts(m, ",prjquota");
 214		else
 215			seq_puts(m, ",pqnoenforce");
 216	}
 217	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
 218		if (mp->m_qflags & XFS_GQUOTA_ENFD)
 219			seq_puts(m, ",grpquota");
 220		else
 221			seq_puts(m, ",gqnoenforce");
 222	}
 223
 224	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 225		seq_puts(m, ",noquota");
 226
 227	return 0;
 228}
 229
 230/*
 231 * Set parameters for inode allocation heuristics, taking into account
 232 * filesystem size and inode32/inode64 mount options; i.e. specifically
 233 * whether or not XFS_MOUNT_SMALL_INUMS is set.
 234 *
 235 * Inode allocation patterns are altered only if inode32 is requested
 236 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
 237 * If altered, XFS_MOUNT_32BITINODES is set as well.
 238 *
 239 * An agcount independent of that in the mount structure is provided
 240 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 241 * to the potentially higher ag count.
 242 *
 243 * Returns the maximum AG index which may contain inodes.
 244 */
 245xfs_agnumber_t
 246xfs_set_inode_alloc(
 247	struct xfs_mount *mp,
 248	xfs_agnumber_t	agcount)
 249{
 250	xfs_agnumber_t	index;
 251	xfs_agnumber_t	maxagi = 0;
 252	xfs_sb_t	*sbp = &mp->m_sb;
 253	xfs_agnumber_t	max_metadata;
 254	xfs_agino_t	agino;
 255	xfs_ino_t	ino;
 256
 257	/*
 258	 * Calculate how much should be reserved for inodes to meet
 259	 * the max inode percentage.  Used only for inode32.
 260	 */
 261	if (M_IGEO(mp)->maxicount) {
 262		uint64_t	icount;
 263
 264		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 265		do_div(icount, 100);
 266		icount += sbp->sb_agblocks - 1;
 267		do_div(icount, sbp->sb_agblocks);
 268		max_metadata = icount;
 269	} else {
 270		max_metadata = agcount;
 271	}
 272
 273	/* Get the last possible inode in the filesystem */
 274	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
 275	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 276
 277	/*
 278	 * If user asked for no more than 32-bit inodes, and the fs is
 279	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
 280	 * the allocator to accommodate the request.
 281	 */
 282	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
 283		mp->m_flags |= XFS_MOUNT_32BITINODES;
 284	else
 285		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
 286
 287	for (index = 0; index < agcount; index++) {
 288		struct xfs_perag	*pag;
 289
 290		ino = XFS_AGINO_TO_INO(mp, index, agino);
 291
 292		pag = xfs_perag_get(mp, index);
 293
 294		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
 295			if (ino > XFS_MAXINUMBER_32) {
 296				pag->pagi_inodeok = 0;
 297				pag->pagf_metadata = 0;
 298			} else {
 299				pag->pagi_inodeok = 1;
 300				maxagi++;
 301				if (index < max_metadata)
 302					pag->pagf_metadata = 1;
 303				else
 304					pag->pagf_metadata = 0;
 305			}
 306		} else {
 307			pag->pagi_inodeok = 1;
 308			pag->pagf_metadata = 0;
 309		}
 310
 311		xfs_perag_put(pag);
 312	}
 313
 314	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315}
 316
 317STATIC int
 318xfs_blkdev_get(
 319	xfs_mount_t		*mp,
 320	const char		*name,
 321	struct block_device	**bdevp)
 322{
 323	int			error = 0;
 324
 325	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 326				    mp);
 327	if (IS_ERR(*bdevp)) {
 328		error = PTR_ERR(*bdevp);
 329		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
 330	}
 331
 332	return error;
 333}
 334
 335STATIC void
 336xfs_blkdev_put(
 337	struct block_device	*bdev)
 338{
 339	if (bdev)
 340		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 341}
 342
 343STATIC void
 344xfs_close_devices(
 345	struct xfs_mount	*mp)
 346{
 347	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
 348
 349	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 350		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
 351		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
 352
 353		xfs_free_buftarg(mp->m_logdev_targp);
 354		xfs_blkdev_put(logdev);
 355		fs_put_dax(dax_logdev);
 356	}
 357	if (mp->m_rtdev_targp) {
 358		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
 359		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
 360
 361		xfs_free_buftarg(mp->m_rtdev_targp);
 362		xfs_blkdev_put(rtdev);
 363		fs_put_dax(dax_rtdev);
 364	}
 365	xfs_free_buftarg(mp->m_ddev_targp);
 366	fs_put_dax(dax_ddev);
 367}
 368
 369/*
 370 * The file system configurations are:
 371 *	(1) device (partition) with data and internal log
 372 *	(2) logical volume with data and log subvolumes.
 373 *	(3) logical volume with data, log, and realtime subvolumes.
 374 *
 375 * We only have to handle opening the log and realtime volumes here if
 376 * they are present.  The data subvolume has already been opened by
 377 * get_sb_bdev() and is stored in sb->s_bdev.
 378 */
 379STATIC int
 380xfs_open_devices(
 381	struct xfs_mount	*mp)
 382{
 383	struct block_device	*ddev = mp->m_super->s_bdev;
 384	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
 385	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
 386	struct block_device	*logdev = NULL, *rtdev = NULL;
 387	int			error;
 388
 389	/*
 390	 * Open real time and log devices - order is important.
 391	 */
 392	if (mp->m_logname) {
 393		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
 394		if (error)
 395			goto out;
 396		dax_logdev = fs_dax_get_by_bdev(logdev);
 397	}
 398
 399	if (mp->m_rtname) {
 400		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
 401		if (error)
 402			goto out_close_logdev;
 403
 404		if (rtdev == ddev || rtdev == logdev) {
 405			xfs_warn(mp,
 406	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 407			error = -EINVAL;
 408			goto out_close_rtdev;
 409		}
 410		dax_rtdev = fs_dax_get_by_bdev(rtdev);
 411	}
 412
 413	/*
 414	 * Setup xfs_mount buffer target pointers
 415	 */
 416	error = -ENOMEM;
 417	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
 418	if (!mp->m_ddev_targp)
 419		goto out_close_rtdev;
 420
 421	if (rtdev) {
 422		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
 423		if (!mp->m_rtdev_targp)
 424			goto out_free_ddev_targ;
 425	}
 426
 427	if (logdev && logdev != ddev) {
 428		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
 429		if (!mp->m_logdev_targp)
 430			goto out_free_rtdev_targ;
 431	} else {
 432		mp->m_logdev_targp = mp->m_ddev_targp;
 433	}
 434
 435	return 0;
 436
 437 out_free_rtdev_targ:
 438	if (mp->m_rtdev_targp)
 439		xfs_free_buftarg(mp->m_rtdev_targp);
 440 out_free_ddev_targ:
 441	xfs_free_buftarg(mp->m_ddev_targp);
 442 out_close_rtdev:
 443	xfs_blkdev_put(rtdev);
 444	fs_put_dax(dax_rtdev);
 445 out_close_logdev:
 446	if (logdev && logdev != ddev) {
 447		xfs_blkdev_put(logdev);
 448		fs_put_dax(dax_logdev);
 449	}
 450 out:
 451	fs_put_dax(dax_ddev);
 452	return error;
 453}
 454
 455/*
 456 * Setup xfs_mount buffer target pointers based on superblock
 457 */
 458STATIC int
 459xfs_setup_devices(
 460	struct xfs_mount	*mp)
 461{
 462	int			error;
 463
 464	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
 465	if (error)
 466		return error;
 467
 468	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 469		unsigned int	log_sector_size = BBSIZE;
 470
 471		if (xfs_sb_version_hassector(&mp->m_sb))
 472			log_sector_size = mp->m_sb.sb_logsectsize;
 473		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 474					    log_sector_size);
 475		if (error)
 476			return error;
 477	}
 478	if (mp->m_rtdev_targp) {
 479		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 480					    mp->m_sb.sb_sectsize);
 481		if (error)
 482			return error;
 483	}
 484
 485	return 0;
 486}
 487
 488STATIC int
 489xfs_init_mount_workqueues(
 490	struct xfs_mount	*mp)
 491{
 492	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
 493			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 494			1, mp->m_super->s_id);
 495	if (!mp->m_buf_workqueue)
 496		goto out;
 497
 498	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
 499			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 500			0, mp->m_super->s_id);
 501	if (!mp->m_unwritten_workqueue)
 502		goto out_destroy_buf;
 503
 504	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
 505			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
 506			0, mp->m_super->s_id);
 507	if (!mp->m_cil_workqueue)
 508		goto out_destroy_unwritten;
 509
 510	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
 511			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 512			0, mp->m_super->s_id);
 513	if (!mp->m_reclaim_workqueue)
 514		goto out_destroy_cil;
 515
 516	mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s",
 517			WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM,
 518			0, mp->m_super->s_id);
 519	if (!mp->m_gc_workqueue)
 520		goto out_destroy_reclaim;
 521
 
 
 
 
 
 
 522	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
 523			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
 524	if (!mp->m_sync_workqueue)
 525		goto out_destroy_eofb;
 526
 527	return 0;
 528
 529out_destroy_eofb:
 530	destroy_workqueue(mp->m_gc_workqueue);
 
 
 531out_destroy_reclaim:
 532	destroy_workqueue(mp->m_reclaim_workqueue);
 533out_destroy_cil:
 534	destroy_workqueue(mp->m_cil_workqueue);
 535out_destroy_unwritten:
 536	destroy_workqueue(mp->m_unwritten_workqueue);
 537out_destroy_buf:
 538	destroy_workqueue(mp->m_buf_workqueue);
 539out:
 540	return -ENOMEM;
 541}
 542
 543STATIC void
 544xfs_destroy_mount_workqueues(
 545	struct xfs_mount	*mp)
 546{
 547	destroy_workqueue(mp->m_sync_workqueue);
 548	destroy_workqueue(mp->m_gc_workqueue);
 
 549	destroy_workqueue(mp->m_reclaim_workqueue);
 550	destroy_workqueue(mp->m_cil_workqueue);
 551	destroy_workqueue(mp->m_unwritten_workqueue);
 552	destroy_workqueue(mp->m_buf_workqueue);
 553}
 554
 555static void
 556xfs_flush_inodes_worker(
 557	struct work_struct	*work)
 558{
 559	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
 560						   m_flush_inodes_work);
 561	struct super_block	*sb = mp->m_super;
 562
 563	if (down_read_trylock(&sb->s_umount)) {
 564		sync_inodes_sb(sb);
 565		up_read(&sb->s_umount);
 566	}
 567}
 568
 569/*
 570 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 571 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 572 * for IO to complete so that we effectively throttle multiple callers to the
 573 * rate at which IO is completing.
 574 */
 575void
 576xfs_flush_inodes(
 577	struct xfs_mount	*mp)
 578{
 579	/*
 580	 * If flush_work() returns true then that means we waited for a flush
 581	 * which was already in progress.  Don't bother running another scan.
 582	 */
 583	if (flush_work(&mp->m_flush_inodes_work))
 584		return;
 585
 586	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
 587	flush_work(&mp->m_flush_inodes_work);
 588}
 589
 590/* Catch misguided souls that try to use this interface on XFS */
 591STATIC struct inode *
 592xfs_fs_alloc_inode(
 593	struct super_block	*sb)
 594{
 595	BUG();
 596	return NULL;
 597}
 598
 599#ifdef DEBUG
 600static void
 601xfs_check_delalloc(
 602	struct xfs_inode	*ip,
 603	int			whichfork)
 604{
 605	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
 606	struct xfs_bmbt_irec	got;
 607	struct xfs_iext_cursor	icur;
 608
 609	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
 610		return;
 611	do {
 612		if (isnullstartblock(got.br_startblock)) {
 613			xfs_warn(ip->i_mount,
 614	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
 615				ip->i_ino,
 616				whichfork == XFS_DATA_FORK ? "data" : "cow",
 617				got.br_startoff, got.br_blockcount);
 618		}
 619	} while (xfs_iext_next_extent(ifp, &icur, &got));
 620}
 621#else
 622#define xfs_check_delalloc(ip, whichfork)	do { } while (0)
 623#endif
 624
 625/*
 626 * Now that the generic code is guaranteed not to be accessing
 627 * the linux inode, we can inactivate and reclaim the inode.
 628 */
 629STATIC void
 630xfs_fs_destroy_inode(
 631	struct inode		*inode)
 632{
 633	struct xfs_inode	*ip = XFS_I(inode);
 634
 635	trace_xfs_destroy_inode(ip);
 636
 637	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 638	XFS_STATS_INC(ip->i_mount, vn_rele);
 639	XFS_STATS_INC(ip->i_mount, vn_remove);
 640
 641	xfs_inactive(ip);
 642
 643	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
 644		xfs_check_delalloc(ip, XFS_DATA_FORK);
 645		xfs_check_delalloc(ip, XFS_COW_FORK);
 646		ASSERT(0);
 647	}
 648
 649	XFS_STATS_INC(ip->i_mount, vn_reclaim);
 650
 651	/*
 652	 * We should never get here with one of the reclaim flags already set.
 653	 */
 654	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
 655	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
 656
 657	/*
 658	 * We always use background reclaim here because even if the inode is
 659	 * clean, it still may be under IO and hence we have wait for IO
 660	 * completion to occur before we can reclaim the inode. The background
 661	 * reclaim path handles this more efficiently than we can here, so
 662	 * simply let background reclaim tear down all inodes.
 663	 */
 664	xfs_inode_mark_reclaimable(ip);
 665}
 666
 667static void
 668xfs_fs_dirty_inode(
 669	struct inode			*inode,
 670	int				flag)
 671{
 672	struct xfs_inode		*ip = XFS_I(inode);
 673	struct xfs_mount		*mp = ip->i_mount;
 674	struct xfs_trans		*tp;
 675
 676	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
 677		return;
 678	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
 
 
 
 
 
 
 679		return;
 680
 681	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
 682		return;
 683	xfs_ilock(ip, XFS_ILOCK_EXCL);
 684	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 685	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
 686	xfs_trans_commit(tp);
 687}
 688
 689/*
 690 * Slab object creation initialisation for the XFS inode.
 691 * This covers only the idempotent fields in the XFS inode;
 692 * all other fields need to be initialised on allocation
 693 * from the slab. This avoids the need to repeatedly initialise
 694 * fields in the xfs inode that left in the initialise state
 695 * when freeing the inode.
 696 */
 697STATIC void
 698xfs_fs_inode_init_once(
 699	void			*inode)
 700{
 701	struct xfs_inode	*ip = inode;
 702
 703	memset(ip, 0, sizeof(struct xfs_inode));
 704
 705	/* vfs inode */
 706	inode_init_once(VFS_I(ip));
 707
 708	/* xfs inode */
 709	atomic_set(&ip->i_pincount, 0);
 710	spin_lock_init(&ip->i_flags_lock);
 711
 712	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
 713		     "xfsino", ip->i_ino);
 714	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
 715		     "xfsino", ip->i_ino);
 716}
 717
 718/*
 719 * We do an unlocked check for XFS_IDONTCACHE here because we are already
 720 * serialised against cache hits here via the inode->i_lock and igrab() in
 721 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
 722 * racing with us, and it avoids needing to grab a spinlock here for every inode
 723 * we drop the final reference on.
 724 */
 725STATIC int
 726xfs_fs_drop_inode(
 727	struct inode		*inode)
 728{
 729	struct xfs_inode	*ip = XFS_I(inode);
 730
 731	/*
 732	 * If this unlinked inode is in the middle of recovery, don't
 733	 * drop the inode just yet; log recovery will take care of
 734	 * that.  See the comment for this inode flag.
 735	 */
 736	if (ip->i_flags & XFS_IRECOVERY) {
 737		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
 738		return 0;
 739	}
 740
 741	return generic_drop_inode(inode);
 742}
 743
 744static void
 745xfs_mount_free(
 746	struct xfs_mount	*mp)
 747{
 748	kfree(mp->m_rtname);
 749	kfree(mp->m_logname);
 750	kmem_free(mp);
 751}
 752
 753STATIC int
 754xfs_fs_sync_fs(
 755	struct super_block	*sb,
 756	int			wait)
 757{
 758	struct xfs_mount	*mp = XFS_M(sb);
 
 
 
 759
 760	/*
 761	 * Doing anything during the async pass would be counterproductive.
 762	 */
 763	if (!wait)
 764		return 0;
 765
 766	xfs_log_force(mp, XFS_LOG_SYNC);
 
 
 
 767	if (laptop_mode) {
 768		/*
 769		 * The disk must be active because we're syncing.
 770		 * We schedule log work now (now that the disk is
 771		 * active) instead of later (when it might not be).
 772		 */
 773		flush_delayed_work(&mp->m_log->l_work);
 774	}
 775
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 776	return 0;
 777}
 778
 779STATIC int
 780xfs_fs_statfs(
 781	struct dentry		*dentry,
 782	struct kstatfs		*statp)
 783{
 784	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
 785	xfs_sb_t		*sbp = &mp->m_sb;
 786	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
 787	uint64_t		fakeinos, id;
 788	uint64_t		icount;
 789	uint64_t		ifree;
 790	uint64_t		fdblocks;
 791	xfs_extlen_t		lsize;
 792	int64_t			ffree;
 793
 
 
 
 
 
 
 794	statp->f_type = XFS_SUPER_MAGIC;
 795	statp->f_namelen = MAXNAMELEN - 1;
 796
 797	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
 798	statp->f_fsid = u64_to_fsid(id);
 799
 800	icount = percpu_counter_sum(&mp->m_icount);
 801	ifree = percpu_counter_sum(&mp->m_ifree);
 802	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
 803
 804	spin_lock(&mp->m_sb_lock);
 805	statp->f_bsize = sbp->sb_blocksize;
 806	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
 807	statp->f_blocks = sbp->sb_dblocks - lsize;
 808	spin_unlock(&mp->m_sb_lock);
 809
 810	/* make sure statp->f_bfree does not underflow */
 811	statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
 
 812	statp->f_bavail = statp->f_bfree;
 813
 814	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
 815	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
 816	if (M_IGEO(mp)->maxicount)
 817		statp->f_files = min_t(typeof(statp->f_files),
 818					statp->f_files,
 819					M_IGEO(mp)->maxicount);
 820
 821	/* If sb_icount overshot maxicount, report actual allocation */
 822	statp->f_files = max_t(typeof(statp->f_files),
 823					statp->f_files,
 824					sbp->sb_icount);
 825
 826	/* make sure statp->f_ffree does not underflow */
 827	ffree = statp->f_files - (icount - ifree);
 828	statp->f_ffree = max_t(int64_t, ffree, 0);
 829
 830
 831	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
 832	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
 833			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
 834		xfs_qm_statvfs(ip, statp);
 835
 836	if (XFS_IS_REALTIME_MOUNT(mp) &&
 837	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
 
 
 838		statp->f_blocks = sbp->sb_rblocks;
 839		statp->f_bavail = statp->f_bfree =
 840			sbp->sb_frextents * sbp->sb_rextsize;
 841	}
 842
 843	return 0;
 844}
 845
 846STATIC void
 847xfs_save_resvblks(struct xfs_mount *mp)
 848{
 849	uint64_t resblks = 0;
 850
 851	mp->m_resblks_save = mp->m_resblks;
 852	xfs_reserve_blocks(mp, &resblks, NULL);
 853}
 854
 855STATIC void
 856xfs_restore_resvblks(struct xfs_mount *mp)
 857{
 858	uint64_t resblks;
 859
 860	if (mp->m_resblks_save) {
 861		resblks = mp->m_resblks_save;
 862		mp->m_resblks_save = 0;
 863	} else
 864		resblks = xfs_default_resblks(mp);
 865
 866	xfs_reserve_blocks(mp, &resblks, NULL);
 867}
 868
 869/*
 870 * Second stage of a freeze. The data is already frozen so we only
 871 * need to take care of the metadata. Once that's done sync the superblock
 872 * to the log to dirty it in case of a crash while frozen. This ensures that we
 873 * will recover the unlinked inode lists on the next mount.
 874 */
 875STATIC int
 876xfs_fs_freeze(
 877	struct super_block	*sb)
 878{
 879	struct xfs_mount	*mp = XFS_M(sb);
 880	unsigned int		flags;
 881	int			ret;
 882
 883	/*
 884	 * The filesystem is now frozen far enough that memory reclaim
 885	 * cannot safely operate on the filesystem. Hence we need to
 886	 * set a GFP_NOFS context here to avoid recursion deadlocks.
 887	 */
 888	flags = memalloc_nofs_save();
 889	xfs_blockgc_stop(mp);
 890	xfs_save_resvblks(mp);
 891	ret = xfs_log_quiesce(mp);
 892	memalloc_nofs_restore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 893	return ret;
 894}
 895
 896STATIC int
 897xfs_fs_unfreeze(
 898	struct super_block	*sb)
 899{
 900	struct xfs_mount	*mp = XFS_M(sb);
 901
 902	xfs_restore_resvblks(mp);
 903	xfs_log_work_queue(mp);
 904	xfs_blockgc_start(mp);
 
 
 
 
 
 
 
 
 
 
 
 905	return 0;
 906}
 907
 908/*
 909 * This function fills in xfs_mount_t fields based on mount args.
 910 * Note: the superblock _has_ now been read in.
 911 */
 912STATIC int
 913xfs_finish_flags(
 914	struct xfs_mount	*mp)
 915{
 916	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
 917
 918	/* Fail a mount where the logbuf is smaller than the log stripe */
 919	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
 920		if (mp->m_logbsize <= 0 &&
 921		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
 922			mp->m_logbsize = mp->m_sb.sb_logsunit;
 923		} else if (mp->m_logbsize > 0 &&
 924			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
 925			xfs_warn(mp,
 926		"logbuf size must be greater than or equal to log stripe size");
 927			return -EINVAL;
 928		}
 929	} else {
 930		/* Fail a mount if the logbuf is larger than 32K */
 931		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
 932			xfs_warn(mp,
 933		"logbuf size for version 1 logs must be 16K or 32K");
 934			return -EINVAL;
 935		}
 936	}
 937
 938	/*
 939	 * V5 filesystems always use attr2 format for attributes.
 940	 */
 941	if (xfs_sb_version_hascrc(&mp->m_sb) &&
 942	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
 943		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
 944			     "attr2 is always enabled for V5 filesystems.");
 945		return -EINVAL;
 946	}
 947
 948	/*
 949	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
 950	 * told by noattr2 to turn it off
 951	 */
 952	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
 953	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
 954		mp->m_flags |= XFS_MOUNT_ATTR2;
 955
 956	/*
 957	 * prohibit r/w mounts of read-only filesystems
 958	 */
 959	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
 960		xfs_warn(mp,
 961			"cannot mount a read-only filesystem as read-write");
 962		return -EROFS;
 963	}
 964
 965	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
 966	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
 967	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
 968		xfs_warn(mp,
 969		  "Super block does not support project and group quota together");
 970		return -EINVAL;
 971	}
 972
 973	return 0;
 974}
 975
 976static int
 977xfs_init_percpu_counters(
 978	struct xfs_mount	*mp)
 979{
 980	int		error;
 981
 982	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
 983	if (error)
 984		return -ENOMEM;
 985
 986	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
 987	if (error)
 988		goto free_icount;
 989
 990	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
 991	if (error)
 992		goto free_ifree;
 993
 994	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
 995	if (error)
 996		goto free_fdblocks;
 997
 
 
 
 
 998	return 0;
 999
 
 
1000free_fdblocks:
1001	percpu_counter_destroy(&mp->m_fdblocks);
1002free_ifree:
1003	percpu_counter_destroy(&mp->m_ifree);
1004free_icount:
1005	percpu_counter_destroy(&mp->m_icount);
1006	return -ENOMEM;
1007}
1008
1009void
1010xfs_reinit_percpu_counters(
1011	struct xfs_mount	*mp)
1012{
1013	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1014	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1015	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
 
1016}
1017
1018static void
1019xfs_destroy_percpu_counters(
1020	struct xfs_mount	*mp)
1021{
1022	percpu_counter_destroy(&mp->m_icount);
1023	percpu_counter_destroy(&mp->m_ifree);
1024	percpu_counter_destroy(&mp->m_fdblocks);
1025	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1026	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1027	percpu_counter_destroy(&mp->m_delalloc_blks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028}
1029
1030static void
1031xfs_fs_put_super(
1032	struct super_block	*sb)
1033{
1034	struct xfs_mount	*mp = XFS_M(sb);
1035
1036	/* if ->fill_super failed, we have no mount to tear down */
1037	if (!sb->s_fs_info)
1038		return;
1039
1040	xfs_notice(mp, "Unmounting Filesystem");
1041	xfs_filestream_unmount(mp);
1042	xfs_unmountfs(mp);
1043
1044	xfs_freesb(mp);
1045	free_percpu(mp->m_stats.xs_stats);
 
 
1046	xfs_destroy_percpu_counters(mp);
1047	xfs_destroy_mount_workqueues(mp);
1048	xfs_close_devices(mp);
1049
1050	sb->s_fs_info = NULL;
1051	xfs_mount_free(mp);
1052}
1053
1054static long
1055xfs_fs_nr_cached_objects(
1056	struct super_block	*sb,
1057	struct shrink_control	*sc)
1058{
1059	/* Paranoia: catch incorrect calls during mount setup or teardown */
1060	if (WARN_ON_ONCE(!sb->s_fs_info))
1061		return 0;
1062	return xfs_reclaim_inodes_count(XFS_M(sb));
1063}
1064
1065static long
1066xfs_fs_free_cached_objects(
1067	struct super_block	*sb,
1068	struct shrink_control	*sc)
1069{
1070	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1071}
1072
1073static const struct super_operations xfs_super_operations = {
1074	.alloc_inode		= xfs_fs_alloc_inode,
1075	.destroy_inode		= xfs_fs_destroy_inode,
1076	.dirty_inode		= xfs_fs_dirty_inode,
1077	.drop_inode		= xfs_fs_drop_inode,
1078	.put_super		= xfs_fs_put_super,
1079	.sync_fs		= xfs_fs_sync_fs,
1080	.freeze_fs		= xfs_fs_freeze,
1081	.unfreeze_fs		= xfs_fs_unfreeze,
1082	.statfs			= xfs_fs_statfs,
1083	.show_options		= xfs_fs_show_options,
1084	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1085	.free_cached_objects	= xfs_fs_free_cached_objects,
1086};
1087
1088static int
1089suffix_kstrtoint(
1090	const char	*s,
1091	unsigned int	base,
1092	int		*res)
1093{
1094	int		last, shift_left_factor = 0, _res;
1095	char		*value;
1096	int		ret = 0;
1097
1098	value = kstrdup(s, GFP_KERNEL);
1099	if (!value)
1100		return -ENOMEM;
1101
1102	last = strlen(value) - 1;
1103	if (value[last] == 'K' || value[last] == 'k') {
1104		shift_left_factor = 10;
1105		value[last] = '\0';
1106	}
1107	if (value[last] == 'M' || value[last] == 'm') {
1108		shift_left_factor = 20;
1109		value[last] = '\0';
1110	}
1111	if (value[last] == 'G' || value[last] == 'g') {
1112		shift_left_factor = 30;
1113		value[last] = '\0';
1114	}
1115
1116	if (kstrtoint(value, base, &_res))
1117		ret = -EINVAL;
1118	kfree(value);
1119	*res = _res << shift_left_factor;
1120	return ret;
1121}
1122
1123static inline void
1124xfs_fs_warn_deprecated(
1125	struct fs_context	*fc,
1126	struct fs_parameter	*param,
1127	uint64_t		flag,
1128	bool			value)
1129{
1130	/* Don't print the warning if reconfiguring and current mount point
1131	 * already had the flag set
1132	 */
1133	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1134			!!(XFS_M(fc->root->d_sb)->m_flags & flag) == value)
1135		return;
1136	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1137}
1138
1139/*
1140 * Set mount state from a mount option.
1141 *
1142 * NOTE: mp->m_super is NULL here!
1143 */
1144static int
1145xfs_fs_parse_param(
1146	struct fs_context	*fc,
1147	struct fs_parameter	*param)
1148{
1149	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1150	struct fs_parse_result	result;
1151	int			size = 0;
1152	int			opt;
1153
1154	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1155	if (opt < 0)
1156		return opt;
1157
1158	switch (opt) {
1159	case Opt_logbufs:
1160		parsing_mp->m_logbufs = result.uint_32;
1161		return 0;
1162	case Opt_logbsize:
1163		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1164			return -EINVAL;
1165		return 0;
1166	case Opt_logdev:
1167		kfree(parsing_mp->m_logname);
1168		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1169		if (!parsing_mp->m_logname)
1170			return -ENOMEM;
1171		return 0;
1172	case Opt_rtdev:
1173		kfree(parsing_mp->m_rtname);
1174		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1175		if (!parsing_mp->m_rtname)
1176			return -ENOMEM;
1177		return 0;
1178	case Opt_allocsize:
1179		if (suffix_kstrtoint(param->string, 10, &size))
1180			return -EINVAL;
1181		parsing_mp->m_allocsize_log = ffs(size) - 1;
1182		parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1183		return 0;
1184	case Opt_grpid:
1185	case Opt_bsdgroups:
1186		parsing_mp->m_flags |= XFS_MOUNT_GRPID;
1187		return 0;
1188	case Opt_nogrpid:
1189	case Opt_sysvgroups:
1190		parsing_mp->m_flags &= ~XFS_MOUNT_GRPID;
1191		return 0;
1192	case Opt_wsync:
1193		parsing_mp->m_flags |= XFS_MOUNT_WSYNC;
1194		return 0;
1195	case Opt_norecovery:
1196		parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY;
1197		return 0;
1198	case Opt_noalign:
1199		parsing_mp->m_flags |= XFS_MOUNT_NOALIGN;
1200		return 0;
1201	case Opt_swalloc:
1202		parsing_mp->m_flags |= XFS_MOUNT_SWALLOC;
1203		return 0;
1204	case Opt_sunit:
1205		parsing_mp->m_dalign = result.uint_32;
1206		return 0;
1207	case Opt_swidth:
1208		parsing_mp->m_swidth = result.uint_32;
1209		return 0;
1210	case Opt_inode32:
1211		parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1212		return 0;
1213	case Opt_inode64:
1214		parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1215		return 0;
1216	case Opt_nouuid:
1217		parsing_mp->m_flags |= XFS_MOUNT_NOUUID;
1218		return 0;
1219	case Opt_largeio:
1220		parsing_mp->m_flags |= XFS_MOUNT_LARGEIO;
1221		return 0;
1222	case Opt_nolargeio:
1223		parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1224		return 0;
1225	case Opt_filestreams:
1226		parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1227		return 0;
1228	case Opt_noquota:
1229		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1230		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1231		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1232		return 0;
1233	case Opt_quota:
1234	case Opt_uquota:
1235	case Opt_usrquota:
1236		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1237				 XFS_UQUOTA_ENFD);
1238		return 0;
1239	case Opt_qnoenforce:
1240	case Opt_uqnoenforce:
1241		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1242		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1243		return 0;
1244	case Opt_pquota:
1245	case Opt_prjquota:
1246		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1247				 XFS_PQUOTA_ENFD);
1248		return 0;
1249	case Opt_pqnoenforce:
1250		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1251		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1252		return 0;
1253	case Opt_gquota:
1254	case Opt_grpquota:
1255		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1256				 XFS_GQUOTA_ENFD);
1257		return 0;
1258	case Opt_gqnoenforce:
1259		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1260		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1261		return 0;
1262	case Opt_discard:
1263		parsing_mp->m_flags |= XFS_MOUNT_DISCARD;
1264		return 0;
1265	case Opt_nodiscard:
1266		parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD;
1267		return 0;
1268#ifdef CONFIG_FS_DAX
1269	case Opt_dax:
1270		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1271		return 0;
1272	case Opt_dax_enum:
1273		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1274		return 0;
1275#endif
1276	/* Following mount options will be removed in September 2025 */
1277	case Opt_ikeep:
1278		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true);
1279		parsing_mp->m_flags |= XFS_MOUNT_IKEEP;
1280		return 0;
1281	case Opt_noikeep:
1282		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false);
1283		parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP;
1284		return 0;
1285	case Opt_attr2:
1286		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true);
1287		parsing_mp->m_flags |= XFS_MOUNT_ATTR2;
1288		return 0;
1289	case Opt_noattr2:
1290		xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
1291		parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2;
1292		parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
1293		return 0;
1294	default:
1295		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1296		return -EINVAL;
1297	}
1298
1299	return 0;
1300}
1301
1302static int
1303xfs_fs_validate_params(
1304	struct xfs_mount	*mp)
1305{
 
 
 
 
 
 
1306	/*
1307	 * no recovery flag requires a read-only mount
 
1308	 */
1309	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1310	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1311		xfs_warn(mp, "no-recovery mounts must be read-only.");
1312		return -EINVAL;
1313	}
1314
1315	if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1316	    (mp->m_dalign || mp->m_swidth)) {
1317		xfs_warn(mp,
1318	"sunit and swidth options incompatible with the noalign option");
1319		return -EINVAL;
1320	}
1321
1322	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1323		xfs_warn(mp, "quota support not available in this kernel.");
1324		return -EINVAL;
1325	}
1326
1327	if ((mp->m_dalign && !mp->m_swidth) ||
1328	    (!mp->m_dalign && mp->m_swidth)) {
1329		xfs_warn(mp, "sunit and swidth must be specified together");
1330		return -EINVAL;
1331	}
1332
1333	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1334		xfs_warn(mp,
1335	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1336			mp->m_swidth, mp->m_dalign);
1337		return -EINVAL;
1338	}
1339
1340	if (mp->m_logbufs != -1 &&
1341	    mp->m_logbufs != 0 &&
1342	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1343	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1344		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1345			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1346		return -EINVAL;
1347	}
1348
1349	if (mp->m_logbsize != -1 &&
1350	    mp->m_logbsize !=  0 &&
1351	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1352	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1353	     !is_power_of_2(mp->m_logbsize))) {
1354		xfs_warn(mp,
1355			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1356			mp->m_logbsize);
1357		return -EINVAL;
1358	}
1359
1360	if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1361	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1362	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1363		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1364			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1365		return -EINVAL;
1366	}
1367
1368	return 0;
1369}
1370
1371static int
1372xfs_fs_fill_super(
1373	struct super_block	*sb,
1374	struct fs_context	*fc)
1375{
1376	struct xfs_mount	*mp = sb->s_fs_info;
1377	struct inode		*root;
1378	int			flags = 0, error;
1379
1380	mp->m_super = sb;
1381
1382	error = xfs_fs_validate_params(mp);
1383	if (error)
1384		goto out_free_names;
1385
1386	sb_min_blocksize(sb, BBSIZE);
1387	sb->s_xattr = xfs_xattr_handlers;
1388	sb->s_export_op = &xfs_export_operations;
1389#ifdef CONFIG_XFS_QUOTA
1390	sb->s_qcop = &xfs_quotactl_operations;
1391	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1392#endif
1393	sb->s_op = &xfs_super_operations;
1394
1395	/*
1396	 * Delay mount work if the debug hook is set. This is debug
1397	 * instrumention to coordinate simulation of xfs mount failures with
1398	 * VFS superblock operations
1399	 */
1400	if (xfs_globals.mount_delay) {
1401		xfs_notice(mp, "Delaying mount for %d seconds.",
1402			xfs_globals.mount_delay);
1403		msleep(xfs_globals.mount_delay * 1000);
1404	}
1405
1406	if (fc->sb_flags & SB_SILENT)
1407		flags |= XFS_MFSI_QUIET;
1408
1409	error = xfs_open_devices(mp);
1410	if (error)
1411		goto out_free_names;
1412
1413	error = xfs_init_mount_workqueues(mp);
1414	if (error)
1415		goto out_close_devices;
1416
1417	error = xfs_init_percpu_counters(mp);
1418	if (error)
1419		goto out_destroy_workqueues;
1420
 
 
 
 
 
 
 
 
 
 
 
1421	/* Allocate stats memory before we do operations that might use it */
1422	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1423	if (!mp->m_stats.xs_stats) {
1424		error = -ENOMEM;
1425		goto out_destroy_counters;
1426	}
1427
1428	error = xfs_readsb(mp, flags);
1429	if (error)
1430		goto out_free_stats;
1431
1432	error = xfs_finish_flags(mp);
1433	if (error)
1434		goto out_free_sb;
1435
1436	error = xfs_setup_devices(mp);
1437	if (error)
1438		goto out_free_sb;
1439
1440	/* V4 support is undergoing deprecation. */
1441	if (!xfs_sb_version_hascrc(&mp->m_sb)) {
1442#ifdef CONFIG_XFS_SUPPORT_V4
1443		xfs_warn_once(mp,
1444	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1445#else
1446		xfs_warn(mp,
1447	"Deprecated V4 format (crc=0) not supported by kernel.");
1448		error = -EINVAL;
1449		goto out_free_sb;
1450#endif
1451	}
1452
1453	/* Filesystem claims it needs repair, so refuse the mount. */
1454	if (xfs_sb_version_needsrepair(&mp->m_sb)) {
1455		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1456		error = -EFSCORRUPTED;
1457		goto out_free_sb;
1458	}
1459
1460	/*
1461	 * Don't touch the filesystem if a user tool thinks it owns the primary
1462	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1463	 * we don't check them at all.
1464	 */
1465	if (mp->m_sb.sb_inprogress) {
1466		xfs_warn(mp, "Offline file system operation in progress!");
1467		error = -EFSCORRUPTED;
1468		goto out_free_sb;
1469	}
1470
1471	/*
1472	 * Until this is fixed only page-sized or smaller data blocks work.
1473	 */
1474	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1475		xfs_warn(mp,
1476		"File system with blocksize %d bytes. "
1477		"Only pagesize (%ld) or less will currently work.",
1478				mp->m_sb.sb_blocksize, PAGE_SIZE);
1479		error = -ENOSYS;
1480		goto out_free_sb;
1481	}
1482
1483	/* Ensure this filesystem fits in the page cache limits */
1484	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1485	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1486		xfs_warn(mp,
1487		"file system too large to be mounted on this system.");
1488		error = -EFBIG;
1489		goto out_free_sb;
1490	}
1491
1492	/*
1493	 * XFS block mappings use 54 bits to store the logical block offset.
1494	 * This should suffice to handle the maximum file size that the VFS
1495	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1496	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1497	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1498	 * to check this assertion.
1499	 *
1500	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1501	 * maximum pagecache offset in units of fs blocks.
1502	 */
1503	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1504		xfs_warn(mp,
1505"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1506			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1507			 XFS_MAX_FILEOFF);
1508		error = -EINVAL;
1509		goto out_free_sb;
1510	}
1511
1512	error = xfs_filestream_mount(mp);
1513	if (error)
1514		goto out_free_sb;
1515
1516	/*
1517	 * we must configure the block size in the superblock before we run the
1518	 * full mount process as the mount process can lookup and cache inodes.
1519	 */
1520	sb->s_magic = XFS_SUPER_MAGIC;
1521	sb->s_blocksize = mp->m_sb.sb_blocksize;
1522	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1523	sb->s_maxbytes = MAX_LFS_FILESIZE;
1524	sb->s_max_links = XFS_MAXLINK;
1525	sb->s_time_gran = 1;
1526	if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
1527		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1528		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1529	} else {
1530		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1531		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1532	}
1533	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1534	sb->s_iflags |= SB_I_CGROUPWB;
1535
1536	set_posix_acl_flag(sb);
1537
1538	/* version 5 superblocks support inode version counters. */
1539	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1540		sb->s_flags |= SB_I_VERSION;
1541
1542	if (xfs_sb_version_hasbigtime(&mp->m_sb))
1543		xfs_warn(mp,
1544 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!");
1545
1546	if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1547		bool rtdev_is_dax = false, datadev_is_dax;
1548
1549		xfs_warn(mp,
1550		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1551
1552		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1553			sb->s_blocksize);
1554		if (mp->m_rtdev_targp)
1555			rtdev_is_dax = bdev_dax_supported(
1556				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1557		if (!rtdev_is_dax && !datadev_is_dax) {
1558			xfs_alert(mp,
1559			"DAX unsupported by block device. Turning off DAX.");
1560			xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1561		}
1562		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1563			xfs_alert(mp,
1564		"DAX and reflink cannot be used together!");
1565			error = -EINVAL;
1566			goto out_filestream_unmount;
1567		}
1568	}
1569
1570	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1571		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1572
1573		if (!blk_queue_discard(q)) {
1574			xfs_warn(mp, "mounting with \"discard\" option, but "
1575					"the device does not support discard");
1576			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1577		}
1578	}
1579
1580	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1581		if (mp->m_sb.sb_rblocks) {
1582			xfs_alert(mp,
1583	"reflink not compatible with realtime device!");
1584			error = -EINVAL;
1585			goto out_filestream_unmount;
1586		}
1587
1588		if (xfs_globals.always_cow) {
1589			xfs_info(mp, "using DEBUG-only always_cow mode.");
1590			mp->m_always_cow = true;
1591		}
1592	}
1593
1594	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1595		xfs_alert(mp,
1596	"reverse mapping btree not compatible with realtime device!");
1597		error = -EINVAL;
1598		goto out_filestream_unmount;
1599	}
1600
1601	if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
1602		xfs_warn(mp,
1603 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!");
1604
1605	error = xfs_mountfs(mp);
1606	if (error)
1607		goto out_filestream_unmount;
1608
1609	root = igrab(VFS_I(mp->m_rootip));
1610	if (!root) {
1611		error = -ENOENT;
1612		goto out_unmount;
1613	}
1614	sb->s_root = d_make_root(root);
1615	if (!sb->s_root) {
1616		error = -ENOMEM;
1617		goto out_unmount;
1618	}
1619
1620	return 0;
1621
1622 out_filestream_unmount:
1623	xfs_filestream_unmount(mp);
1624 out_free_sb:
1625	xfs_freesb(mp);
1626 out_free_stats:
1627	free_percpu(mp->m_stats.xs_stats);
 
 
 
1628 out_destroy_counters:
1629	xfs_destroy_percpu_counters(mp);
1630 out_destroy_workqueues:
1631	xfs_destroy_mount_workqueues(mp);
1632 out_close_devices:
1633	xfs_close_devices(mp);
1634 out_free_names:
1635	sb->s_fs_info = NULL;
1636	xfs_mount_free(mp);
1637	return error;
1638
1639 out_unmount:
1640	xfs_filestream_unmount(mp);
1641	xfs_unmountfs(mp);
1642	goto out_free_sb;
1643}
1644
1645static int
1646xfs_fs_get_tree(
1647	struct fs_context	*fc)
1648{
1649	return get_tree_bdev(fc, xfs_fs_fill_super);
1650}
1651
1652static int
1653xfs_remount_rw(
1654	struct xfs_mount	*mp)
1655{
1656	struct xfs_sb		*sbp = &mp->m_sb;
1657	int error;
1658
1659	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1660		xfs_warn(mp,
1661			"ro->rw transition prohibited on norecovery mount");
1662		return -EINVAL;
1663	}
1664
1665	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1666	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1667		xfs_warn(mp,
1668	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1669			(sbp->sb_features_ro_compat &
1670				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1671		return -EINVAL;
1672	}
1673
1674	mp->m_flags &= ~XFS_MOUNT_RDONLY;
1675
1676	/*
1677	 * If this is the first remount to writeable state we might have some
1678	 * superblock changes to update.
1679	 */
1680	if (mp->m_update_sb) {
1681		error = xfs_sync_sb(mp, false);
1682		if (error) {
1683			xfs_warn(mp, "failed to write sb changes");
1684			return error;
1685		}
1686		mp->m_update_sb = false;
1687	}
1688
1689	/*
1690	 * Fill out the reserve pool if it is empty. Use the stashed value if
1691	 * it is non-zero, otherwise go with the default.
1692	 */
1693	xfs_restore_resvblks(mp);
1694	xfs_log_work_queue(mp);
1695
1696	/* Recover any CoW blocks that never got remapped. */
1697	error = xfs_reflink_recover_cow(mp);
1698	if (error) {
1699		xfs_err(mp,
1700			"Error %d recovering leftover CoW allocations.", error);
1701		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1702		return error;
1703	}
1704	xfs_blockgc_start(mp);
1705
1706	/* Create the per-AG metadata reservation pool .*/
1707	error = xfs_fs_reserve_ag_blocks(mp);
1708	if (error && error != -ENOSPC)
1709		return error;
1710
 
 
 
1711	return 0;
1712}
1713
1714static int
1715xfs_remount_ro(
1716	struct xfs_mount	*mp)
1717{
1718	int error;
 
 
 
 
 
 
 
 
1719
1720	/*
1721	 * Cancel background eofb scanning so it cannot race with the final
1722	 * log force+buftarg wait and deadlock the remount.
1723	 */
1724	xfs_blockgc_stop(mp);
1725
1726	/* Get rid of any leftover CoW reservations... */
1727	error = xfs_blockgc_free_space(mp, NULL);
 
 
 
 
 
1728	if (error) {
1729		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1730		return error;
1731	}
1732
 
 
 
 
 
 
 
 
 
1733	/* Free the per-AG metadata reservation pool. */
1734	error = xfs_fs_unreserve_ag_blocks(mp);
1735	if (error) {
1736		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1737		return error;
1738	}
1739
1740	/*
1741	 * Before we sync the metadata, we need to free up the reserve block
1742	 * pool so that the used block count in the superblock on disk is
1743	 * correct at the end of the remount. Stash the current* reserve pool
1744	 * size so that if we get remounted rw, we can return it to the same
1745	 * size.
1746	 */
1747	xfs_save_resvblks(mp);
1748
1749	xfs_log_clean(mp);
1750	mp->m_flags |= XFS_MOUNT_RDONLY;
1751
1752	return 0;
1753}
1754
1755/*
1756 * Logically we would return an error here to prevent users from believing
1757 * they might have changed mount options using remount which can't be changed.
1758 *
1759 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1760 * arguments in some cases so we can't blindly reject options, but have to
1761 * check for each specified option if it actually differs from the currently
1762 * set option and only reject it if that's the case.
1763 *
1764 * Until that is implemented we return success for every remount request, and
1765 * silently ignore all options that we can't actually change.
1766 */
1767static int
1768xfs_fs_reconfigure(
1769	struct fs_context *fc)
1770{
1771	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1772	struct xfs_mount        *new_mp = fc->s_fs_info;
1773	xfs_sb_t		*sbp = &mp->m_sb;
1774	int			flags = fc->sb_flags;
1775	int			error;
1776
1777	/* version 5 superblocks always support version counters. */
1778	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1779		fc->sb_flags |= SB_I_VERSION;
1780
1781	error = xfs_fs_validate_params(new_mp);
1782	if (error)
1783		return error;
1784
1785	sync_filesystem(mp->m_super);
1786
1787	/* inode32 -> inode64 */
1788	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1789	    !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1790		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1791		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1792	}
1793
1794	/* inode64 -> inode32 */
1795	if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1796	    (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1797		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1798		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1799	}
1800
1801	/* ro -> rw */
1802	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1803		error = xfs_remount_rw(mp);
1804		if (error)
1805			return error;
1806	}
1807
1808	/* rw -> ro */
1809	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1810		error = xfs_remount_ro(mp);
1811		if (error)
1812			return error;
1813	}
1814
1815	return 0;
1816}
1817
1818static void xfs_fs_free(
1819	struct fs_context	*fc)
1820{
1821	struct xfs_mount	*mp = fc->s_fs_info;
1822
1823	/*
1824	 * mp is stored in the fs_context when it is initialized.
1825	 * mp is transferred to the superblock on a successful mount,
1826	 * but if an error occurs before the transfer we have to free
1827	 * it here.
1828	 */
1829	if (mp)
1830		xfs_mount_free(mp);
1831}
1832
1833static const struct fs_context_operations xfs_context_ops = {
1834	.parse_param = xfs_fs_parse_param,
1835	.get_tree    = xfs_fs_get_tree,
1836	.reconfigure = xfs_fs_reconfigure,
1837	.free        = xfs_fs_free,
1838};
1839
1840static int xfs_init_fs_context(
1841	struct fs_context	*fc)
1842{
1843	struct xfs_mount	*mp;
1844
1845	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1846	if (!mp)
1847		return -ENOMEM;
1848
1849	spin_lock_init(&mp->m_sb_lock);
1850	spin_lock_init(&mp->m_agirotor_lock);
1851	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1852	spin_lock_init(&mp->m_perag_lock);
1853	mutex_init(&mp->m_growlock);
1854	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1855	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1856	mp->m_kobj.kobject.kset = xfs_kset;
1857	/*
1858	 * We don't create the finobt per-ag space reservation until after log
1859	 * recovery, so we must set this to true so that an ifree transaction
1860	 * started during log recovery will not depend on space reservations
1861	 * for finobt expansion.
1862	 */
1863	mp->m_finobt_nores = true;
1864
1865	/*
1866	 * These can be overridden by the mount option parsing.
1867	 */
1868	mp->m_logbufs = -1;
1869	mp->m_logbsize = -1;
1870	mp->m_allocsize_log = 16; /* 64k */
1871
1872	/*
1873	 * Copy binary VFS mount flags we are interested in.
1874	 */
1875	if (fc->sb_flags & SB_RDONLY)
1876		mp->m_flags |= XFS_MOUNT_RDONLY;
1877	if (fc->sb_flags & SB_DIRSYNC)
1878		mp->m_flags |= XFS_MOUNT_DIRSYNC;
1879	if (fc->sb_flags & SB_SYNCHRONOUS)
1880		mp->m_flags |= XFS_MOUNT_WSYNC;
1881
1882	fc->s_fs_info = mp;
1883	fc->ops = &xfs_context_ops;
1884
1885	return 0;
1886}
1887
1888static struct file_system_type xfs_fs_type = {
1889	.owner			= THIS_MODULE,
1890	.name			= "xfs",
1891	.init_fs_context	= xfs_init_fs_context,
1892	.parameters		= xfs_fs_parameters,
1893	.kill_sb		= kill_block_super,
1894	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1895};
1896MODULE_ALIAS_FS("xfs");
1897
1898STATIC int __init
1899xfs_init_zones(void)
1900{
1901	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
 
 
 
 
 
 
 
 
 
 
1902						sizeof(struct xlog_ticket),
1903						0, 0, NULL);
1904	if (!xfs_log_ticket_zone)
1905		goto out;
 
 
 
 
1906
1907	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1908					sizeof(struct xfs_extent_free_item),
1909					0, 0, NULL);
1910	if (!xfs_bmap_free_item_zone)
1911		goto out_destroy_log_ticket_zone;
1912
1913	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1914					       sizeof(struct xfs_btree_cur),
1915					       0, 0, NULL);
1916	if (!xfs_btree_cur_zone)
1917		goto out_destroy_bmap_free_item_zone;
1918
1919	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1920					      sizeof(struct xfs_da_state),
1921					      0, 0, NULL);
1922	if (!xfs_da_state_zone)
1923		goto out_destroy_btree_cur_zone;
1924
1925	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1926					   sizeof(struct xfs_ifork),
1927					   0, 0, NULL);
1928	if (!xfs_ifork_zone)
1929		goto out_destroy_da_state_zone;
1930
1931	xfs_trans_zone = kmem_cache_create("xfs_trans",
1932					   sizeof(struct xfs_trans),
1933					   0, 0, NULL);
1934	if (!xfs_trans_zone)
1935		goto out_destroy_ifork_zone;
1936
1937
1938	/*
1939	 * The size of the zone allocated buf log item is the maximum
1940	 * size possible under XFS.  This wastes a little bit of memory,
1941	 * but it is much faster.
1942	 */
1943	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1944					      sizeof(struct xfs_buf_log_item),
1945					      0, 0, NULL);
1946	if (!xfs_buf_item_zone)
1947		goto out_destroy_trans_zone;
1948
1949	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1950					(sizeof(struct xfs_efd_log_item) +
1951					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
1952					sizeof(struct xfs_extent)),
1953					0, 0, NULL);
1954	if (!xfs_efd_zone)
1955		goto out_destroy_buf_item_zone;
1956
1957	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1958					 (sizeof(struct xfs_efi_log_item) +
1959					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1960					 sizeof(struct xfs_extent)),
1961					 0, 0, NULL);
1962	if (!xfs_efi_zone)
1963		goto out_destroy_efd_zone;
1964
1965	xfs_inode_zone = kmem_cache_create("xfs_inode",
1966					   sizeof(struct xfs_inode), 0,
1967					   (SLAB_HWCACHE_ALIGN |
1968					    SLAB_RECLAIM_ACCOUNT |
1969					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1970					   xfs_fs_inode_init_once);
1971	if (!xfs_inode_zone)
1972		goto out_destroy_efi_zone;
1973
1974	xfs_ili_zone = kmem_cache_create("xfs_ili",
1975					 sizeof(struct xfs_inode_log_item), 0,
1976					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1977					 NULL);
1978	if (!xfs_ili_zone)
1979		goto out_destroy_inode_zone;
1980
1981	xfs_icreate_zone = kmem_cache_create("xfs_icr",
1982					     sizeof(struct xfs_icreate_item),
1983					     0, 0, NULL);
1984	if (!xfs_icreate_zone)
1985		goto out_destroy_ili_zone;
1986
1987	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1988					 sizeof(struct xfs_rud_log_item),
1989					 0, 0, NULL);
1990	if (!xfs_rud_zone)
1991		goto out_destroy_icreate_zone;
1992
1993	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
1994			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1995			0, 0, NULL);
1996	if (!xfs_rui_zone)
1997		goto out_destroy_rud_zone;
1998
1999	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
2000					 sizeof(struct xfs_cud_log_item),
2001					 0, 0, NULL);
2002	if (!xfs_cud_zone)
2003		goto out_destroy_rui_zone;
2004
2005	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2006			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2007			0, 0, NULL);
2008	if (!xfs_cui_zone)
2009		goto out_destroy_cud_zone;
2010
2011	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
2012					 sizeof(struct xfs_bud_log_item),
2013					 0, 0, NULL);
2014	if (!xfs_bud_zone)
2015		goto out_destroy_cui_zone;
2016
2017	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2018			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2019			0, 0, NULL);
2020	if (!xfs_bui_zone)
2021		goto out_destroy_bud_zone;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2022
2023	return 0;
2024
2025 out_destroy_bud_zone:
2026	kmem_cache_destroy(xfs_bud_zone);
2027 out_destroy_cui_zone:
2028	kmem_cache_destroy(xfs_cui_zone);
2029 out_destroy_cud_zone:
2030	kmem_cache_destroy(xfs_cud_zone);
2031 out_destroy_rui_zone:
2032	kmem_cache_destroy(xfs_rui_zone);
2033 out_destroy_rud_zone:
2034	kmem_cache_destroy(xfs_rud_zone);
2035 out_destroy_icreate_zone:
2036	kmem_cache_destroy(xfs_icreate_zone);
2037 out_destroy_ili_zone:
2038	kmem_cache_destroy(xfs_ili_zone);
2039 out_destroy_inode_zone:
2040	kmem_cache_destroy(xfs_inode_zone);
2041 out_destroy_efi_zone:
2042	kmem_cache_destroy(xfs_efi_zone);
2043 out_destroy_efd_zone:
2044	kmem_cache_destroy(xfs_efd_zone);
2045 out_destroy_buf_item_zone:
2046	kmem_cache_destroy(xfs_buf_item_zone);
2047 out_destroy_trans_zone:
2048	kmem_cache_destroy(xfs_trans_zone);
2049 out_destroy_ifork_zone:
2050	kmem_cache_destroy(xfs_ifork_zone);
2051 out_destroy_da_state_zone:
2052	kmem_cache_destroy(xfs_da_state_zone);
2053 out_destroy_btree_cur_zone:
2054	kmem_cache_destroy(xfs_btree_cur_zone);
2055 out_destroy_bmap_free_item_zone:
2056	kmem_cache_destroy(xfs_bmap_free_item_zone);
2057 out_destroy_log_ticket_zone:
2058	kmem_cache_destroy(xfs_log_ticket_zone);
 
 
 
 
 
 
 
 
2059 out:
2060	return -ENOMEM;
2061}
2062
2063STATIC void
2064xfs_destroy_zones(void)
2065{
2066	/*
2067	 * Make sure all delayed rcu free are flushed before we
2068	 * destroy caches.
2069	 */
2070	rcu_barrier();
2071	kmem_cache_destroy(xfs_bui_zone);
2072	kmem_cache_destroy(xfs_bud_zone);
2073	kmem_cache_destroy(xfs_cui_zone);
2074	kmem_cache_destroy(xfs_cud_zone);
2075	kmem_cache_destroy(xfs_rui_zone);
2076	kmem_cache_destroy(xfs_rud_zone);
2077	kmem_cache_destroy(xfs_icreate_zone);
2078	kmem_cache_destroy(xfs_ili_zone);
2079	kmem_cache_destroy(xfs_inode_zone);
2080	kmem_cache_destroy(xfs_efi_zone);
2081	kmem_cache_destroy(xfs_efd_zone);
2082	kmem_cache_destroy(xfs_buf_item_zone);
2083	kmem_cache_destroy(xfs_trans_zone);
2084	kmem_cache_destroy(xfs_ifork_zone);
2085	kmem_cache_destroy(xfs_da_state_zone);
2086	kmem_cache_destroy(xfs_btree_cur_zone);
2087	kmem_cache_destroy(xfs_bmap_free_item_zone);
2088	kmem_cache_destroy(xfs_log_ticket_zone);
 
 
 
 
2089}
2090
2091STATIC int __init
2092xfs_init_workqueues(void)
2093{
2094	/*
2095	 * The allocation workqueue can be used in memory reclaim situations
2096	 * (writepage path), and parallelism is only limited by the number of
2097	 * AGs in all the filesystems mounted. Hence use the default large
2098	 * max_active value for this workqueue.
2099	 */
2100	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2101			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2102	if (!xfs_alloc_wq)
2103		return -ENOMEM;
2104
2105	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2106			0);
2107	if (!xfs_discard_wq)
2108		goto out_free_alloc_wq;
2109
2110	return 0;
2111out_free_alloc_wq:
2112	destroy_workqueue(xfs_alloc_wq);
2113	return -ENOMEM;
2114}
2115
2116STATIC void
2117xfs_destroy_workqueues(void)
2118{
2119	destroy_workqueue(xfs_discard_wq);
2120	destroy_workqueue(xfs_alloc_wq);
2121}
2122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2123STATIC int __init
2124init_xfs_fs(void)
2125{
2126	int			error;
2127
2128	xfs_check_ondisk_structs();
2129
2130	printk(KERN_INFO XFS_VERSION_STRING " with "
2131			 XFS_BUILD_OPTIONS " enabled\n");
2132
2133	xfs_dir_startup();
2134
2135	error = xfs_init_zones();
2136	if (error)
2137		goto out;
2138
 
 
 
 
2139	error = xfs_init_workqueues();
2140	if (error)
2141		goto out_destroy_zones;
2142
2143	error = xfs_mru_cache_init();
2144	if (error)
2145		goto out_destroy_wq;
2146
2147	error = xfs_buf_init();
2148	if (error)
2149		goto out_mru_cache_uninit;
2150
2151	error = xfs_init_procfs();
2152	if (error)
2153		goto out_buf_terminate;
2154
2155	error = xfs_sysctl_register();
2156	if (error)
2157		goto out_cleanup_procfs;
2158
2159	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2160	if (!xfs_kset) {
2161		error = -ENOMEM;
2162		goto out_sysctl_unregister;
2163	}
2164
2165	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2166
2167	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2168	if (!xfsstats.xs_stats) {
2169		error = -ENOMEM;
2170		goto out_kset_unregister;
2171	}
2172
2173	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2174			       "stats");
2175	if (error)
2176		goto out_free_stats;
2177
2178#ifdef DEBUG
2179	xfs_dbg_kobj.kobject.kset = xfs_kset;
2180	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2181	if (error)
2182		goto out_remove_stats_kobj;
2183#endif
2184
2185	error = xfs_qm_init();
2186	if (error)
2187		goto out_remove_dbg_kobj;
2188
2189	error = register_filesystem(&xfs_fs_type);
2190	if (error)
2191		goto out_qm_exit;
2192	return 0;
2193
2194 out_qm_exit:
2195	xfs_qm_exit();
2196 out_remove_dbg_kobj:
2197#ifdef DEBUG
2198	xfs_sysfs_del(&xfs_dbg_kobj);
2199 out_remove_stats_kobj:
2200#endif
2201	xfs_sysfs_del(&xfsstats.xs_kobj);
2202 out_free_stats:
2203	free_percpu(xfsstats.xs_stats);
2204 out_kset_unregister:
2205	kset_unregister(xfs_kset);
2206 out_sysctl_unregister:
2207	xfs_sysctl_unregister();
2208 out_cleanup_procfs:
2209	xfs_cleanup_procfs();
2210 out_buf_terminate:
2211	xfs_buf_terminate();
2212 out_mru_cache_uninit:
2213	xfs_mru_cache_uninit();
2214 out_destroy_wq:
2215	xfs_destroy_workqueues();
2216 out_destroy_zones:
2217	xfs_destroy_zones();
 
 
2218 out:
2219	return error;
2220}
2221
2222STATIC void __exit
2223exit_xfs_fs(void)
2224{
2225	xfs_qm_exit();
2226	unregister_filesystem(&xfs_fs_type);
2227#ifdef DEBUG
2228	xfs_sysfs_del(&xfs_dbg_kobj);
2229#endif
2230	xfs_sysfs_del(&xfsstats.xs_kobj);
2231	free_percpu(xfsstats.xs_stats);
2232	kset_unregister(xfs_kset);
2233	xfs_sysctl_unregister();
2234	xfs_cleanup_procfs();
2235	xfs_buf_terminate();
2236	xfs_mru_cache_uninit();
2237	xfs_destroy_workqueues();
2238	xfs_destroy_zones();
2239	xfs_uuid_table_free();
 
2240}
2241
2242module_init(init_xfs_fs);
2243module_exit(exit_xfs_fs);
2244
2245MODULE_AUTHOR("Silicon Graphics, Inc.");
2246MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2247MODULE_LICENSE("GPL");
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6
   7#include "xfs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_sb.h"
  13#include "xfs_mount.h"
  14#include "xfs_inode.h"
  15#include "xfs_btree.h"
  16#include "xfs_bmap.h"
  17#include "xfs_alloc.h"
  18#include "xfs_fsops.h"
  19#include "xfs_trans.h"
  20#include "xfs_buf_item.h"
  21#include "xfs_log.h"
  22#include "xfs_log_priv.h"
  23#include "xfs_dir2.h"
  24#include "xfs_extfree_item.h"
  25#include "xfs_mru_cache.h"
  26#include "xfs_inode_item.h"
  27#include "xfs_icache.h"
  28#include "xfs_trace.h"
  29#include "xfs_icreate_item.h"
  30#include "xfs_filestream.h"
  31#include "xfs_quota.h"
  32#include "xfs_sysfs.h"
  33#include "xfs_ondisk.h"
  34#include "xfs_rmap_item.h"
  35#include "xfs_refcount_item.h"
  36#include "xfs_bmap_item.h"
  37#include "xfs_reflink.h"
  38#include "xfs_pwork.h"
  39#include "xfs_ag.h"
  40#include "xfs_defer.h"
  41#include "xfs_attr_item.h"
  42#include "xfs_xattr.h"
  43#include "xfs_iunlink_item.h"
  44
  45#include <linux/magic.h>
  46#include <linux/fs_context.h>
  47#include <linux/fs_parser.h>
  48
  49static const struct super_operations xfs_super_operations;
  50
  51static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
  52#ifdef DEBUG
  53static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
  54#endif
  55
  56#ifdef CONFIG_HOTPLUG_CPU
  57static LIST_HEAD(xfs_mount_list);
  58static DEFINE_SPINLOCK(xfs_mount_list_lock);
  59
  60static inline void xfs_mount_list_add(struct xfs_mount *mp)
  61{
  62	spin_lock(&xfs_mount_list_lock);
  63	list_add(&mp->m_mount_list, &xfs_mount_list);
  64	spin_unlock(&xfs_mount_list_lock);
  65}
  66
  67static inline void xfs_mount_list_del(struct xfs_mount *mp)
  68{
  69	spin_lock(&xfs_mount_list_lock);
  70	list_del(&mp->m_mount_list);
  71	spin_unlock(&xfs_mount_list_lock);
  72}
  73#else /* !CONFIG_HOTPLUG_CPU */
  74static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
  75static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
  76#endif
  77
  78enum xfs_dax_mode {
  79	XFS_DAX_INODE = 0,
  80	XFS_DAX_ALWAYS = 1,
  81	XFS_DAX_NEVER = 2,
  82};
  83
  84static void
  85xfs_mount_set_dax_mode(
  86	struct xfs_mount	*mp,
  87	enum xfs_dax_mode	mode)
  88{
  89	switch (mode) {
  90	case XFS_DAX_INODE:
  91		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
  92		break;
  93	case XFS_DAX_ALWAYS:
  94		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
  95		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
  96		break;
  97	case XFS_DAX_NEVER:
  98		mp->m_features |= XFS_FEAT_DAX_NEVER;
  99		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
 100		break;
 101	}
 102}
 103
 104static const struct constant_table dax_param_enums[] = {
 105	{"inode",	XFS_DAX_INODE },
 106	{"always",	XFS_DAX_ALWAYS },
 107	{"never",	XFS_DAX_NEVER },
 108	{}
 109};
 110
 111/*
 112 * Table driven mount option parser.
 113 */
 114enum {
 115	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
 116	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
 117	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
 118	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
 119	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
 120	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
 121	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
 122	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
 123	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
 124};
 125
 126static const struct fs_parameter_spec xfs_fs_parameters[] = {
 127	fsparam_u32("logbufs",		Opt_logbufs),
 128	fsparam_string("logbsize",	Opt_logbsize),
 129	fsparam_string("logdev",	Opt_logdev),
 130	fsparam_string("rtdev",		Opt_rtdev),
 131	fsparam_flag("wsync",		Opt_wsync),
 132	fsparam_flag("noalign",		Opt_noalign),
 133	fsparam_flag("swalloc",		Opt_swalloc),
 134	fsparam_u32("sunit",		Opt_sunit),
 135	fsparam_u32("swidth",		Opt_swidth),
 136	fsparam_flag("nouuid",		Opt_nouuid),
 137	fsparam_flag("grpid",		Opt_grpid),
 138	fsparam_flag("nogrpid",		Opt_nogrpid),
 139	fsparam_flag("bsdgroups",	Opt_bsdgroups),
 140	fsparam_flag("sysvgroups",	Opt_sysvgroups),
 141	fsparam_string("allocsize",	Opt_allocsize),
 142	fsparam_flag("norecovery",	Opt_norecovery),
 143	fsparam_flag("inode64",		Opt_inode64),
 144	fsparam_flag("inode32",		Opt_inode32),
 145	fsparam_flag("ikeep",		Opt_ikeep),
 146	fsparam_flag("noikeep",		Opt_noikeep),
 147	fsparam_flag("largeio",		Opt_largeio),
 148	fsparam_flag("nolargeio",	Opt_nolargeio),
 149	fsparam_flag("attr2",		Opt_attr2),
 150	fsparam_flag("noattr2",		Opt_noattr2),
 151	fsparam_flag("filestreams",	Opt_filestreams),
 152	fsparam_flag("quota",		Opt_quota),
 153	fsparam_flag("noquota",		Opt_noquota),
 154	fsparam_flag("usrquota",	Opt_usrquota),
 155	fsparam_flag("grpquota",	Opt_grpquota),
 156	fsparam_flag("prjquota",	Opt_prjquota),
 157	fsparam_flag("uquota",		Opt_uquota),
 158	fsparam_flag("gquota",		Opt_gquota),
 159	fsparam_flag("pquota",		Opt_pquota),
 160	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
 161	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
 162	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
 163	fsparam_flag("qnoenforce",	Opt_qnoenforce),
 164	fsparam_flag("discard",		Opt_discard),
 165	fsparam_flag("nodiscard",	Opt_nodiscard),
 166	fsparam_flag("dax",		Opt_dax),
 167	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
 168	{}
 169};
 170
 171struct proc_xfs_info {
 172	uint64_t	flag;
 173	char		*str;
 174};
 175
 176static int
 177xfs_fs_show_options(
 178	struct seq_file		*m,
 179	struct dentry		*root)
 180{
 181	static struct proc_xfs_info xfs_info_set[] = {
 182		/* the few simple ones we can get from the mount struct */
 183		{ XFS_FEAT_IKEEP,		",ikeep" },
 184		{ XFS_FEAT_WSYNC,		",wsync" },
 185		{ XFS_FEAT_NOALIGN,		",noalign" },
 186		{ XFS_FEAT_SWALLOC,		",swalloc" },
 187		{ XFS_FEAT_NOUUID,		",nouuid" },
 188		{ XFS_FEAT_NORECOVERY,		",norecovery" },
 189		{ XFS_FEAT_ATTR2,		",attr2" },
 190		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
 191		{ XFS_FEAT_GRPID,		",grpid" },
 192		{ XFS_FEAT_DISCARD,		",discard" },
 193		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
 194		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
 195		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
 196		{ 0, NULL }
 197	};
 198	struct xfs_mount	*mp = XFS_M(root->d_sb);
 199	struct proc_xfs_info	*xfs_infop;
 200
 201	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 202		if (mp->m_features & xfs_infop->flag)
 203			seq_puts(m, xfs_infop->str);
 204	}
 205
 206	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
 
 207
 208	if (xfs_has_allocsize(mp))
 209		seq_printf(m, ",allocsize=%dk",
 210			   (1 << mp->m_allocsize_log) >> 10);
 211
 212	if (mp->m_logbufs > 0)
 213		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
 214	if (mp->m_logbsize > 0)
 215		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
 216
 217	if (mp->m_logname)
 218		seq_show_option(m, "logdev", mp->m_logname);
 219	if (mp->m_rtname)
 220		seq_show_option(m, "rtdev", mp->m_rtname);
 221
 222	if (mp->m_dalign > 0)
 223		seq_printf(m, ",sunit=%d",
 224				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 225	if (mp->m_swidth > 0)
 226		seq_printf(m, ",swidth=%d",
 227				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 228
 229	if (mp->m_qflags & XFS_UQUOTA_ENFD)
 230		seq_puts(m, ",usrquota");
 231	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
 232		seq_puts(m, ",uqnoenforce");
 233
 234	if (mp->m_qflags & XFS_PQUOTA_ENFD)
 235		seq_puts(m, ",prjquota");
 236	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
 237		seq_puts(m, ",pqnoenforce");
 238
 239	if (mp->m_qflags & XFS_GQUOTA_ENFD)
 240		seq_puts(m, ",grpquota");
 241	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
 242		seq_puts(m, ",gqnoenforce");
 
 
 
 
 
 243
 244	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 245		seq_puts(m, ",noquota");
 246
 247	return 0;
 248}
 249
 250/*
 251 * Set parameters for inode allocation heuristics, taking into account
 252 * filesystem size and inode32/inode64 mount options; i.e. specifically
 253 * whether or not XFS_FEAT_SMALL_INUMS is set.
 254 *
 255 * Inode allocation patterns are altered only if inode32 is requested
 256 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
 257 * If altered, XFS_OPSTATE_INODE32 is set as well.
 258 *
 259 * An agcount independent of that in the mount structure is provided
 260 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 261 * to the potentially higher ag count.
 262 *
 263 * Returns the maximum AG index which may contain inodes.
 264 */
 265xfs_agnumber_t
 266xfs_set_inode_alloc(
 267	struct xfs_mount *mp,
 268	xfs_agnumber_t	agcount)
 269{
 270	xfs_agnumber_t	index;
 271	xfs_agnumber_t	maxagi = 0;
 272	xfs_sb_t	*sbp = &mp->m_sb;
 273	xfs_agnumber_t	max_metadata;
 274	xfs_agino_t	agino;
 275	xfs_ino_t	ino;
 276
 277	/*
 278	 * Calculate how much should be reserved for inodes to meet
 279	 * the max inode percentage.  Used only for inode32.
 280	 */
 281	if (M_IGEO(mp)->maxicount) {
 282		uint64_t	icount;
 283
 284		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 285		do_div(icount, 100);
 286		icount += sbp->sb_agblocks - 1;
 287		do_div(icount, sbp->sb_agblocks);
 288		max_metadata = icount;
 289	} else {
 290		max_metadata = agcount;
 291	}
 292
 293	/* Get the last possible inode in the filesystem */
 294	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
 295	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 296
 297	/*
 298	 * If user asked for no more than 32-bit inodes, and the fs is
 299	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
 300	 * the allocator to accommodate the request.
 301	 */
 302	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
 303		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
 304	else
 305		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
 306
 307	for (index = 0; index < agcount; index++) {
 308		struct xfs_perag	*pag;
 309
 310		ino = XFS_AGINO_TO_INO(mp, index, agino);
 311
 312		pag = xfs_perag_get(mp, index);
 313
 314		if (xfs_is_inode32(mp)) {
 315			if (ino > XFS_MAXINUMBER_32) {
 316				pag->pagi_inodeok = 0;
 317				pag->pagf_metadata = 0;
 318			} else {
 319				pag->pagi_inodeok = 1;
 320				maxagi++;
 321				if (index < max_metadata)
 322					pag->pagf_metadata = 1;
 323				else
 324					pag->pagf_metadata = 0;
 325			}
 326		} else {
 327			pag->pagi_inodeok = 1;
 328			pag->pagf_metadata = 0;
 329		}
 330
 331		xfs_perag_put(pag);
 332	}
 333
 334	return xfs_is_inode32(mp) ? maxagi : agcount;
 335}
 336
 337static int
 338xfs_setup_dax_always(
 339	struct xfs_mount	*mp)
 340{
 341	if (!mp->m_ddev_targp->bt_daxdev &&
 342	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
 343		xfs_alert(mp,
 344			"DAX unsupported by block device. Turning off DAX.");
 345		goto disable_dax;
 346	}
 347
 348	if (mp->m_super->s_blocksize != PAGE_SIZE) {
 349		xfs_alert(mp,
 350			"DAX not supported for blocksize. Turning off DAX.");
 351		goto disable_dax;
 352	}
 353
 354	if (xfs_has_reflink(mp) &&
 355	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
 356		xfs_alert(mp,
 357			"DAX and reflink cannot work with multi-partitions!");
 358		return -EINVAL;
 359	}
 360
 361	xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
 362	return 0;
 363
 364disable_dax:
 365	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
 366	return 0;
 367}
 368
 369STATIC int
 370xfs_blkdev_get(
 371	xfs_mount_t		*mp,
 372	const char		*name,
 373	struct block_device	**bdevp)
 374{
 375	int			error = 0;
 376
 377	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 378				    mp);
 379	if (IS_ERR(*bdevp)) {
 380		error = PTR_ERR(*bdevp);
 381		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
 382	}
 383
 384	return error;
 385}
 386
 387STATIC void
 388xfs_blkdev_put(
 389	struct block_device	*bdev)
 390{
 391	if (bdev)
 392		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 393}
 394
 395STATIC void
 396xfs_close_devices(
 397	struct xfs_mount	*mp)
 398{
 
 
 399	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 400		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
 
 401
 402		xfs_free_buftarg(mp->m_logdev_targp);
 403		xfs_blkdev_put(logdev);
 
 404	}
 405	if (mp->m_rtdev_targp) {
 406		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
 
 407
 408		xfs_free_buftarg(mp->m_rtdev_targp);
 409		xfs_blkdev_put(rtdev);
 
 410	}
 411	xfs_free_buftarg(mp->m_ddev_targp);
 
 412}
 413
 414/*
 415 * The file system configurations are:
 416 *	(1) device (partition) with data and internal log
 417 *	(2) logical volume with data and log subvolumes.
 418 *	(3) logical volume with data, log, and realtime subvolumes.
 419 *
 420 * We only have to handle opening the log and realtime volumes here if
 421 * they are present.  The data subvolume has already been opened by
 422 * get_sb_bdev() and is stored in sb->s_bdev.
 423 */
 424STATIC int
 425xfs_open_devices(
 426	struct xfs_mount	*mp)
 427{
 428	struct block_device	*ddev = mp->m_super->s_bdev;
 
 
 429	struct block_device	*logdev = NULL, *rtdev = NULL;
 430	int			error;
 431
 432	/*
 433	 * Open real time and log devices - order is important.
 434	 */
 435	if (mp->m_logname) {
 436		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
 437		if (error)
 438			return error;
 
 439	}
 440
 441	if (mp->m_rtname) {
 442		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
 443		if (error)
 444			goto out_close_logdev;
 445
 446		if (rtdev == ddev || rtdev == logdev) {
 447			xfs_warn(mp,
 448	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 449			error = -EINVAL;
 450			goto out_close_rtdev;
 451		}
 
 452	}
 453
 454	/*
 455	 * Setup xfs_mount buffer target pointers
 456	 */
 457	error = -ENOMEM;
 458	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
 459	if (!mp->m_ddev_targp)
 460		goto out_close_rtdev;
 461
 462	if (rtdev) {
 463		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
 464		if (!mp->m_rtdev_targp)
 465			goto out_free_ddev_targ;
 466	}
 467
 468	if (logdev && logdev != ddev) {
 469		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
 470		if (!mp->m_logdev_targp)
 471			goto out_free_rtdev_targ;
 472	} else {
 473		mp->m_logdev_targp = mp->m_ddev_targp;
 474	}
 475
 476	return 0;
 477
 478 out_free_rtdev_targ:
 479	if (mp->m_rtdev_targp)
 480		xfs_free_buftarg(mp->m_rtdev_targp);
 481 out_free_ddev_targ:
 482	xfs_free_buftarg(mp->m_ddev_targp);
 483 out_close_rtdev:
 484	xfs_blkdev_put(rtdev);
 
 485 out_close_logdev:
 486	if (logdev && logdev != ddev)
 487		xfs_blkdev_put(logdev);
 
 
 
 
 488	return error;
 489}
 490
 491/*
 492 * Setup xfs_mount buffer target pointers based on superblock
 493 */
 494STATIC int
 495xfs_setup_devices(
 496	struct xfs_mount	*mp)
 497{
 498	int			error;
 499
 500	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
 501	if (error)
 502		return error;
 503
 504	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 505		unsigned int	log_sector_size = BBSIZE;
 506
 507		if (xfs_has_sector(mp))
 508			log_sector_size = mp->m_sb.sb_logsectsize;
 509		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 510					    log_sector_size);
 511		if (error)
 512			return error;
 513	}
 514	if (mp->m_rtdev_targp) {
 515		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 516					    mp->m_sb.sb_sectsize);
 517		if (error)
 518			return error;
 519	}
 520
 521	return 0;
 522}
 523
 524STATIC int
 525xfs_init_mount_workqueues(
 526	struct xfs_mount	*mp)
 527{
 528	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
 529			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 530			1, mp->m_super->s_id);
 531	if (!mp->m_buf_workqueue)
 532		goto out;
 533
 534	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
 535			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 536			0, mp->m_super->s_id);
 537	if (!mp->m_unwritten_workqueue)
 538		goto out_destroy_buf;
 539
 
 
 
 
 
 
 540	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
 541			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 542			0, mp->m_super->s_id);
 543	if (!mp->m_reclaim_workqueue)
 544		goto out_destroy_unwritten;
 545
 546	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
 547			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
 548			0, mp->m_super->s_id);
 549	if (!mp->m_blockgc_wq)
 550		goto out_destroy_reclaim;
 551
 552	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
 553			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 554			1, mp->m_super->s_id);
 555	if (!mp->m_inodegc_wq)
 556		goto out_destroy_blockgc;
 557
 558	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
 559			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
 560	if (!mp->m_sync_workqueue)
 561		goto out_destroy_inodegc;
 562
 563	return 0;
 564
 565out_destroy_inodegc:
 566	destroy_workqueue(mp->m_inodegc_wq);
 567out_destroy_blockgc:
 568	destroy_workqueue(mp->m_blockgc_wq);
 569out_destroy_reclaim:
 570	destroy_workqueue(mp->m_reclaim_workqueue);
 
 
 571out_destroy_unwritten:
 572	destroy_workqueue(mp->m_unwritten_workqueue);
 573out_destroy_buf:
 574	destroy_workqueue(mp->m_buf_workqueue);
 575out:
 576	return -ENOMEM;
 577}
 578
 579STATIC void
 580xfs_destroy_mount_workqueues(
 581	struct xfs_mount	*mp)
 582{
 583	destroy_workqueue(mp->m_sync_workqueue);
 584	destroy_workqueue(mp->m_blockgc_wq);
 585	destroy_workqueue(mp->m_inodegc_wq);
 586	destroy_workqueue(mp->m_reclaim_workqueue);
 
 587	destroy_workqueue(mp->m_unwritten_workqueue);
 588	destroy_workqueue(mp->m_buf_workqueue);
 589}
 590
 591static void
 592xfs_flush_inodes_worker(
 593	struct work_struct	*work)
 594{
 595	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
 596						   m_flush_inodes_work);
 597	struct super_block	*sb = mp->m_super;
 598
 599	if (down_read_trylock(&sb->s_umount)) {
 600		sync_inodes_sb(sb);
 601		up_read(&sb->s_umount);
 602	}
 603}
 604
 605/*
 606 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 607 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 608 * for IO to complete so that we effectively throttle multiple callers to the
 609 * rate at which IO is completing.
 610 */
 611void
 612xfs_flush_inodes(
 613	struct xfs_mount	*mp)
 614{
 615	/*
 616	 * If flush_work() returns true then that means we waited for a flush
 617	 * which was already in progress.  Don't bother running another scan.
 618	 */
 619	if (flush_work(&mp->m_flush_inodes_work))
 620		return;
 621
 622	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
 623	flush_work(&mp->m_flush_inodes_work);
 624}
 625
 626/* Catch misguided souls that try to use this interface on XFS */
 627STATIC struct inode *
 628xfs_fs_alloc_inode(
 629	struct super_block	*sb)
 630{
 631	BUG();
 632	return NULL;
 633}
 634
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635/*
 636 * Now that the generic code is guaranteed not to be accessing
 637 * the linux inode, we can inactivate and reclaim the inode.
 638 */
 639STATIC void
 640xfs_fs_destroy_inode(
 641	struct inode		*inode)
 642{
 643	struct xfs_inode	*ip = XFS_I(inode);
 644
 645	trace_xfs_destroy_inode(ip);
 646
 647	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 648	XFS_STATS_INC(ip->i_mount, vn_rele);
 649	XFS_STATS_INC(ip->i_mount, vn_remove);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650	xfs_inode_mark_reclaimable(ip);
 651}
 652
 653static void
 654xfs_fs_dirty_inode(
 655	struct inode			*inode,
 656	int				flags)
 657{
 658	struct xfs_inode		*ip = XFS_I(inode);
 659	struct xfs_mount		*mp = ip->i_mount;
 660	struct xfs_trans		*tp;
 661
 662	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
 663		return;
 664
 665	/*
 666	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
 667	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
 668	 * in flags possibly together with I_DIRTY_SYNC.
 669	 */
 670	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
 671		return;
 672
 673	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
 674		return;
 675	xfs_ilock(ip, XFS_ILOCK_EXCL);
 676	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 677	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
 678	xfs_trans_commit(tp);
 679}
 680
 681/*
 682 * Slab object creation initialisation for the XFS inode.
 683 * This covers only the idempotent fields in the XFS inode;
 684 * all other fields need to be initialised on allocation
 685 * from the slab. This avoids the need to repeatedly initialise
 686 * fields in the xfs inode that left in the initialise state
 687 * when freeing the inode.
 688 */
 689STATIC void
 690xfs_fs_inode_init_once(
 691	void			*inode)
 692{
 693	struct xfs_inode	*ip = inode;
 694
 695	memset(ip, 0, sizeof(struct xfs_inode));
 696
 697	/* vfs inode */
 698	inode_init_once(VFS_I(ip));
 699
 700	/* xfs inode */
 701	atomic_set(&ip->i_pincount, 0);
 702	spin_lock_init(&ip->i_flags_lock);
 703
 
 
 704	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
 705		     "xfsino", ip->i_ino);
 706}
 707
 708/*
 709 * We do an unlocked check for XFS_IDONTCACHE here because we are already
 710 * serialised against cache hits here via the inode->i_lock and igrab() in
 711 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
 712 * racing with us, and it avoids needing to grab a spinlock here for every inode
 713 * we drop the final reference on.
 714 */
 715STATIC int
 716xfs_fs_drop_inode(
 717	struct inode		*inode)
 718{
 719	struct xfs_inode	*ip = XFS_I(inode);
 720
 721	/*
 722	 * If this unlinked inode is in the middle of recovery, don't
 723	 * drop the inode just yet; log recovery will take care of
 724	 * that.  See the comment for this inode flag.
 725	 */
 726	if (ip->i_flags & XFS_IRECOVERY) {
 727		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
 728		return 0;
 729	}
 730
 731	return generic_drop_inode(inode);
 732}
 733
 734static void
 735xfs_mount_free(
 736	struct xfs_mount	*mp)
 737{
 738	kfree(mp->m_rtname);
 739	kfree(mp->m_logname);
 740	kmem_free(mp);
 741}
 742
 743STATIC int
 744xfs_fs_sync_fs(
 745	struct super_block	*sb,
 746	int			wait)
 747{
 748	struct xfs_mount	*mp = XFS_M(sb);
 749	int			error;
 750
 751	trace_xfs_fs_sync_fs(mp, __return_address);
 752
 753	/*
 754	 * Doing anything during the async pass would be counterproductive.
 755	 */
 756	if (!wait)
 757		return 0;
 758
 759	error = xfs_log_force(mp, XFS_LOG_SYNC);
 760	if (error)
 761		return error;
 762
 763	if (laptop_mode) {
 764		/*
 765		 * The disk must be active because we're syncing.
 766		 * We schedule log work now (now that the disk is
 767		 * active) instead of later (when it might not be).
 768		 */
 769		flush_delayed_work(&mp->m_log->l_work);
 770	}
 771
 772	/*
 773	 * If we are called with page faults frozen out, it means we are about
 774	 * to freeze the transaction subsystem. Take the opportunity to shut
 775	 * down inodegc because once SB_FREEZE_FS is set it's too late to
 776	 * prevent inactivation races with freeze. The fs doesn't get called
 777	 * again by the freezing process until after SB_FREEZE_FS has been set,
 778	 * so it's now or never.  Same logic applies to speculative allocation
 779	 * garbage collection.
 780	 *
 781	 * We don't care if this is a normal syncfs call that does this or
 782	 * freeze that does this - we can run this multiple times without issue
 783	 * and we won't race with a restart because a restart can only occur
 784	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
 785	 */
 786	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
 787		xfs_inodegc_stop(mp);
 788		xfs_blockgc_stop(mp);
 789	}
 790
 791	return 0;
 792}
 793
 794STATIC int
 795xfs_fs_statfs(
 796	struct dentry		*dentry,
 797	struct kstatfs		*statp)
 798{
 799	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
 800	xfs_sb_t		*sbp = &mp->m_sb;
 801	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
 802	uint64_t		fakeinos, id;
 803	uint64_t		icount;
 804	uint64_t		ifree;
 805	uint64_t		fdblocks;
 806	xfs_extlen_t		lsize;
 807	int64_t			ffree;
 808
 809	/*
 810	 * Expedite background inodegc but don't wait. We do not want to block
 811	 * here waiting hours for a billion extent file to be truncated.
 812	 */
 813	xfs_inodegc_push(mp);
 814
 815	statp->f_type = XFS_SUPER_MAGIC;
 816	statp->f_namelen = MAXNAMELEN - 1;
 817
 818	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
 819	statp->f_fsid = u64_to_fsid(id);
 820
 821	icount = percpu_counter_sum(&mp->m_icount);
 822	ifree = percpu_counter_sum(&mp->m_ifree);
 823	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
 824
 825	spin_lock(&mp->m_sb_lock);
 826	statp->f_bsize = sbp->sb_blocksize;
 827	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
 828	statp->f_blocks = sbp->sb_dblocks - lsize;
 829	spin_unlock(&mp->m_sb_lock);
 830
 831	/* make sure statp->f_bfree does not underflow */
 832	statp->f_bfree = max_t(int64_t, 0,
 833				fdblocks - xfs_fdblocks_unavailable(mp));
 834	statp->f_bavail = statp->f_bfree;
 835
 836	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
 837	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
 838	if (M_IGEO(mp)->maxicount)
 839		statp->f_files = min_t(typeof(statp->f_files),
 840					statp->f_files,
 841					M_IGEO(mp)->maxicount);
 842
 843	/* If sb_icount overshot maxicount, report actual allocation */
 844	statp->f_files = max_t(typeof(statp->f_files),
 845					statp->f_files,
 846					sbp->sb_icount);
 847
 848	/* make sure statp->f_ffree does not underflow */
 849	ffree = statp->f_files - (icount - ifree);
 850	statp->f_ffree = max_t(int64_t, ffree, 0);
 851
 852
 853	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
 854	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
 855			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
 856		xfs_qm_statvfs(ip, statp);
 857
 858	if (XFS_IS_REALTIME_MOUNT(mp) &&
 859	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
 860		s64	freertx;
 861
 862		statp->f_blocks = sbp->sb_rblocks;
 863		freertx = percpu_counter_sum_positive(&mp->m_frextents);
 864		statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
 865	}
 866
 867	return 0;
 868}
 869
 870STATIC void
 871xfs_save_resvblks(struct xfs_mount *mp)
 872{
 873	uint64_t resblks = 0;
 874
 875	mp->m_resblks_save = mp->m_resblks;
 876	xfs_reserve_blocks(mp, &resblks, NULL);
 877}
 878
 879STATIC void
 880xfs_restore_resvblks(struct xfs_mount *mp)
 881{
 882	uint64_t resblks;
 883
 884	if (mp->m_resblks_save) {
 885		resblks = mp->m_resblks_save;
 886		mp->m_resblks_save = 0;
 887	} else
 888		resblks = xfs_default_resblks(mp);
 889
 890	xfs_reserve_blocks(mp, &resblks, NULL);
 891}
 892
 893/*
 894 * Second stage of a freeze. The data is already frozen so we only
 895 * need to take care of the metadata. Once that's done sync the superblock
 896 * to the log to dirty it in case of a crash while frozen. This ensures that we
 897 * will recover the unlinked inode lists on the next mount.
 898 */
 899STATIC int
 900xfs_fs_freeze(
 901	struct super_block	*sb)
 902{
 903	struct xfs_mount	*mp = XFS_M(sb);
 904	unsigned int		flags;
 905	int			ret;
 906
 907	/*
 908	 * The filesystem is now frozen far enough that memory reclaim
 909	 * cannot safely operate on the filesystem. Hence we need to
 910	 * set a GFP_NOFS context here to avoid recursion deadlocks.
 911	 */
 912	flags = memalloc_nofs_save();
 
 913	xfs_save_resvblks(mp);
 914	ret = xfs_log_quiesce(mp);
 915	memalloc_nofs_restore(flags);
 916
 917	/*
 918	 * For read-write filesystems, we need to restart the inodegc on error
 919	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
 920	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
 921	 * here, so we can restart safely without racing with a stop in
 922	 * xfs_fs_sync_fs().
 923	 */
 924	if (ret && !xfs_is_readonly(mp)) {
 925		xfs_blockgc_start(mp);
 926		xfs_inodegc_start(mp);
 927	}
 928
 929	return ret;
 930}
 931
 932STATIC int
 933xfs_fs_unfreeze(
 934	struct super_block	*sb)
 935{
 936	struct xfs_mount	*mp = XFS_M(sb);
 937
 938	xfs_restore_resvblks(mp);
 939	xfs_log_work_queue(mp);
 940
 941	/*
 942	 * Don't reactivate the inodegc worker on a readonly filesystem because
 943	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
 944	 * worker because there are no speculative preallocations on a readonly
 945	 * filesystem.
 946	 */
 947	if (!xfs_is_readonly(mp)) {
 948		xfs_blockgc_start(mp);
 949		xfs_inodegc_start(mp);
 950	}
 951
 952	return 0;
 953}
 954
 955/*
 956 * This function fills in xfs_mount_t fields based on mount args.
 957 * Note: the superblock _has_ now been read in.
 958 */
 959STATIC int
 960xfs_finish_flags(
 961	struct xfs_mount	*mp)
 962{
 
 
 963	/* Fail a mount where the logbuf is smaller than the log stripe */
 964	if (xfs_has_logv2(mp)) {
 965		if (mp->m_logbsize <= 0 &&
 966		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
 967			mp->m_logbsize = mp->m_sb.sb_logsunit;
 968		} else if (mp->m_logbsize > 0 &&
 969			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
 970			xfs_warn(mp,
 971		"logbuf size must be greater than or equal to log stripe size");
 972			return -EINVAL;
 973		}
 974	} else {
 975		/* Fail a mount if the logbuf is larger than 32K */
 976		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
 977			xfs_warn(mp,
 978		"logbuf size for version 1 logs must be 16K or 32K");
 979			return -EINVAL;
 980		}
 981	}
 982
 983	/*
 984	 * V5 filesystems always use attr2 format for attributes.
 985	 */
 986	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
 
 987		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
 988			     "attr2 is always enabled for V5 filesystems.");
 989		return -EINVAL;
 990	}
 991
 992	/*
 
 
 
 
 
 
 
 
 993	 * prohibit r/w mounts of read-only filesystems
 994	 */
 995	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
 996		xfs_warn(mp,
 997			"cannot mount a read-only filesystem as read-write");
 998		return -EROFS;
 999	}
1000
1001	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1002	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1003	    !xfs_has_pquotino(mp)) {
1004		xfs_warn(mp,
1005		  "Super block does not support project and group quota together");
1006		return -EINVAL;
1007	}
1008
1009	return 0;
1010}
1011
1012static int
1013xfs_init_percpu_counters(
1014	struct xfs_mount	*mp)
1015{
1016	int		error;
1017
1018	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1019	if (error)
1020		return -ENOMEM;
1021
1022	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1023	if (error)
1024		goto free_icount;
1025
1026	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1027	if (error)
1028		goto free_ifree;
1029
1030	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1031	if (error)
1032		goto free_fdblocks;
1033
1034	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1035	if (error)
1036		goto free_delalloc;
1037
1038	return 0;
1039
1040free_delalloc:
1041	percpu_counter_destroy(&mp->m_delalloc_blks);
1042free_fdblocks:
1043	percpu_counter_destroy(&mp->m_fdblocks);
1044free_ifree:
1045	percpu_counter_destroy(&mp->m_ifree);
1046free_icount:
1047	percpu_counter_destroy(&mp->m_icount);
1048	return -ENOMEM;
1049}
1050
1051void
1052xfs_reinit_percpu_counters(
1053	struct xfs_mount	*mp)
1054{
1055	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1056	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1057	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1058	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1059}
1060
1061static void
1062xfs_destroy_percpu_counters(
1063	struct xfs_mount	*mp)
1064{
1065	percpu_counter_destroy(&mp->m_icount);
1066	percpu_counter_destroy(&mp->m_ifree);
1067	percpu_counter_destroy(&mp->m_fdblocks);
1068	ASSERT(xfs_is_shutdown(mp) ||
1069	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1070	percpu_counter_destroy(&mp->m_delalloc_blks);
1071	percpu_counter_destroy(&mp->m_frextents);
1072}
1073
1074static int
1075xfs_inodegc_init_percpu(
1076	struct xfs_mount	*mp)
1077{
1078	struct xfs_inodegc	*gc;
1079	int			cpu;
1080
1081	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1082	if (!mp->m_inodegc)
1083		return -ENOMEM;
1084
1085	for_each_possible_cpu(cpu) {
1086		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1087		init_llist_head(&gc->list);
1088		gc->items = 0;
1089		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1090	}
1091	return 0;
1092}
1093
1094static void
1095xfs_inodegc_free_percpu(
1096	struct xfs_mount	*mp)
1097{
1098	if (!mp->m_inodegc)
1099		return;
1100	free_percpu(mp->m_inodegc);
1101}
1102
1103static void
1104xfs_fs_put_super(
1105	struct super_block	*sb)
1106{
1107	struct xfs_mount	*mp = XFS_M(sb);
1108
1109	/* if ->fill_super failed, we have no mount to tear down */
1110	if (!sb->s_fs_info)
1111		return;
1112
1113	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1114	xfs_filestream_unmount(mp);
1115	xfs_unmountfs(mp);
1116
1117	xfs_freesb(mp);
1118	free_percpu(mp->m_stats.xs_stats);
1119	xfs_mount_list_del(mp);
1120	xfs_inodegc_free_percpu(mp);
1121	xfs_destroy_percpu_counters(mp);
1122	xfs_destroy_mount_workqueues(mp);
1123	xfs_close_devices(mp);
1124
1125	sb->s_fs_info = NULL;
1126	xfs_mount_free(mp);
1127}
1128
1129static long
1130xfs_fs_nr_cached_objects(
1131	struct super_block	*sb,
1132	struct shrink_control	*sc)
1133{
1134	/* Paranoia: catch incorrect calls during mount setup or teardown */
1135	if (WARN_ON_ONCE(!sb->s_fs_info))
1136		return 0;
1137	return xfs_reclaim_inodes_count(XFS_M(sb));
1138}
1139
1140static long
1141xfs_fs_free_cached_objects(
1142	struct super_block	*sb,
1143	struct shrink_control	*sc)
1144{
1145	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1146}
1147
1148static const struct super_operations xfs_super_operations = {
1149	.alloc_inode		= xfs_fs_alloc_inode,
1150	.destroy_inode		= xfs_fs_destroy_inode,
1151	.dirty_inode		= xfs_fs_dirty_inode,
1152	.drop_inode		= xfs_fs_drop_inode,
1153	.put_super		= xfs_fs_put_super,
1154	.sync_fs		= xfs_fs_sync_fs,
1155	.freeze_fs		= xfs_fs_freeze,
1156	.unfreeze_fs		= xfs_fs_unfreeze,
1157	.statfs			= xfs_fs_statfs,
1158	.show_options		= xfs_fs_show_options,
1159	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1160	.free_cached_objects	= xfs_fs_free_cached_objects,
1161};
1162
1163static int
1164suffix_kstrtoint(
1165	const char	*s,
1166	unsigned int	base,
1167	int		*res)
1168{
1169	int		last, shift_left_factor = 0, _res;
1170	char		*value;
1171	int		ret = 0;
1172
1173	value = kstrdup(s, GFP_KERNEL);
1174	if (!value)
1175		return -ENOMEM;
1176
1177	last = strlen(value) - 1;
1178	if (value[last] == 'K' || value[last] == 'k') {
1179		shift_left_factor = 10;
1180		value[last] = '\0';
1181	}
1182	if (value[last] == 'M' || value[last] == 'm') {
1183		shift_left_factor = 20;
1184		value[last] = '\0';
1185	}
1186	if (value[last] == 'G' || value[last] == 'g') {
1187		shift_left_factor = 30;
1188		value[last] = '\0';
1189	}
1190
1191	if (kstrtoint(value, base, &_res))
1192		ret = -EINVAL;
1193	kfree(value);
1194	*res = _res << shift_left_factor;
1195	return ret;
1196}
1197
1198static inline void
1199xfs_fs_warn_deprecated(
1200	struct fs_context	*fc,
1201	struct fs_parameter	*param,
1202	uint64_t		flag,
1203	bool			value)
1204{
1205	/* Don't print the warning if reconfiguring and current mount point
1206	 * already had the flag set
1207	 */
1208	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1209            !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1210		return;
1211	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1212}
1213
1214/*
1215 * Set mount state from a mount option.
1216 *
1217 * NOTE: mp->m_super is NULL here!
1218 */
1219static int
1220xfs_fs_parse_param(
1221	struct fs_context	*fc,
1222	struct fs_parameter	*param)
1223{
1224	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1225	struct fs_parse_result	result;
1226	int			size = 0;
1227	int			opt;
1228
1229	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1230	if (opt < 0)
1231		return opt;
1232
1233	switch (opt) {
1234	case Opt_logbufs:
1235		parsing_mp->m_logbufs = result.uint_32;
1236		return 0;
1237	case Opt_logbsize:
1238		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1239			return -EINVAL;
1240		return 0;
1241	case Opt_logdev:
1242		kfree(parsing_mp->m_logname);
1243		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1244		if (!parsing_mp->m_logname)
1245			return -ENOMEM;
1246		return 0;
1247	case Opt_rtdev:
1248		kfree(parsing_mp->m_rtname);
1249		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1250		if (!parsing_mp->m_rtname)
1251			return -ENOMEM;
1252		return 0;
1253	case Opt_allocsize:
1254		if (suffix_kstrtoint(param->string, 10, &size))
1255			return -EINVAL;
1256		parsing_mp->m_allocsize_log = ffs(size) - 1;
1257		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1258		return 0;
1259	case Opt_grpid:
1260	case Opt_bsdgroups:
1261		parsing_mp->m_features |= XFS_FEAT_GRPID;
1262		return 0;
1263	case Opt_nogrpid:
1264	case Opt_sysvgroups:
1265		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1266		return 0;
1267	case Opt_wsync:
1268		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1269		return 0;
1270	case Opt_norecovery:
1271		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1272		return 0;
1273	case Opt_noalign:
1274		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1275		return 0;
1276	case Opt_swalloc:
1277		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1278		return 0;
1279	case Opt_sunit:
1280		parsing_mp->m_dalign = result.uint_32;
1281		return 0;
1282	case Opt_swidth:
1283		parsing_mp->m_swidth = result.uint_32;
1284		return 0;
1285	case Opt_inode32:
1286		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1287		return 0;
1288	case Opt_inode64:
1289		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1290		return 0;
1291	case Opt_nouuid:
1292		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1293		return 0;
1294	case Opt_largeio:
1295		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1296		return 0;
1297	case Opt_nolargeio:
1298		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1299		return 0;
1300	case Opt_filestreams:
1301		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1302		return 0;
1303	case Opt_noquota:
1304		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1305		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
 
1306		return 0;
1307	case Opt_quota:
1308	case Opt_uquota:
1309	case Opt_usrquota:
1310		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
 
1311		return 0;
1312	case Opt_qnoenforce:
1313	case Opt_uqnoenforce:
1314		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1315		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1316		return 0;
1317	case Opt_pquota:
1318	case Opt_prjquota:
1319		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
 
1320		return 0;
1321	case Opt_pqnoenforce:
1322		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1323		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1324		return 0;
1325	case Opt_gquota:
1326	case Opt_grpquota:
1327		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
 
1328		return 0;
1329	case Opt_gqnoenforce:
1330		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1331		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1332		return 0;
1333	case Opt_discard:
1334		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1335		return 0;
1336	case Opt_nodiscard:
1337		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1338		return 0;
1339#ifdef CONFIG_FS_DAX
1340	case Opt_dax:
1341		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1342		return 0;
1343	case Opt_dax_enum:
1344		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1345		return 0;
1346#endif
1347	/* Following mount options will be removed in September 2025 */
1348	case Opt_ikeep:
1349		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1350		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1351		return 0;
1352	case Opt_noikeep:
1353		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1354		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1355		return 0;
1356	case Opt_attr2:
1357		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1358		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1359		return 0;
1360	case Opt_noattr2:
1361		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1362		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
 
1363		return 0;
1364	default:
1365		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1366		return -EINVAL;
1367	}
1368
1369	return 0;
1370}
1371
1372static int
1373xfs_fs_validate_params(
1374	struct xfs_mount	*mp)
1375{
1376	/* No recovery flag requires a read-only mount */
1377	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1378		xfs_warn(mp, "no-recovery mounts must be read-only.");
1379		return -EINVAL;
1380	}
1381
1382	/*
1383	 * We have not read the superblock at this point, so only the attr2
1384	 * mount option can set the attr2 feature by this stage.
1385	 */
1386	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1387		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
 
1388		return -EINVAL;
1389	}
1390
1391
1392	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1393		xfs_warn(mp,
1394	"sunit and swidth options incompatible with the noalign option");
1395		return -EINVAL;
1396	}
1397
1398	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1399		xfs_warn(mp, "quota support not available in this kernel.");
1400		return -EINVAL;
1401	}
1402
1403	if ((mp->m_dalign && !mp->m_swidth) ||
1404	    (!mp->m_dalign && mp->m_swidth)) {
1405		xfs_warn(mp, "sunit and swidth must be specified together");
1406		return -EINVAL;
1407	}
1408
1409	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1410		xfs_warn(mp,
1411	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1412			mp->m_swidth, mp->m_dalign);
1413		return -EINVAL;
1414	}
1415
1416	if (mp->m_logbufs != -1 &&
1417	    mp->m_logbufs != 0 &&
1418	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1419	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1420		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1421			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1422		return -EINVAL;
1423	}
1424
1425	if (mp->m_logbsize != -1 &&
1426	    mp->m_logbsize !=  0 &&
1427	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1428	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1429	     !is_power_of_2(mp->m_logbsize))) {
1430		xfs_warn(mp,
1431			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1432			mp->m_logbsize);
1433		return -EINVAL;
1434	}
1435
1436	if (xfs_has_allocsize(mp) &&
1437	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1438	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1439		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1440			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1441		return -EINVAL;
1442	}
1443
1444	return 0;
1445}
1446
1447static int
1448xfs_fs_fill_super(
1449	struct super_block	*sb,
1450	struct fs_context	*fc)
1451{
1452	struct xfs_mount	*mp = sb->s_fs_info;
1453	struct inode		*root;
1454	int			flags = 0, error;
1455
1456	mp->m_super = sb;
1457
1458	error = xfs_fs_validate_params(mp);
1459	if (error)
1460		goto out_free_names;
1461
1462	sb_min_blocksize(sb, BBSIZE);
1463	sb->s_xattr = xfs_xattr_handlers;
1464	sb->s_export_op = &xfs_export_operations;
1465#ifdef CONFIG_XFS_QUOTA
1466	sb->s_qcop = &xfs_quotactl_operations;
1467	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1468#endif
1469	sb->s_op = &xfs_super_operations;
1470
1471	/*
1472	 * Delay mount work if the debug hook is set. This is debug
1473	 * instrumention to coordinate simulation of xfs mount failures with
1474	 * VFS superblock operations
1475	 */
1476	if (xfs_globals.mount_delay) {
1477		xfs_notice(mp, "Delaying mount for %d seconds.",
1478			xfs_globals.mount_delay);
1479		msleep(xfs_globals.mount_delay * 1000);
1480	}
1481
1482	if (fc->sb_flags & SB_SILENT)
1483		flags |= XFS_MFSI_QUIET;
1484
1485	error = xfs_open_devices(mp);
1486	if (error)
1487		goto out_free_names;
1488
1489	error = xfs_init_mount_workqueues(mp);
1490	if (error)
1491		goto out_close_devices;
1492
1493	error = xfs_init_percpu_counters(mp);
1494	if (error)
1495		goto out_destroy_workqueues;
1496
1497	error = xfs_inodegc_init_percpu(mp);
1498	if (error)
1499		goto out_destroy_counters;
1500
1501	/*
1502	 * All percpu data structures requiring cleanup when a cpu goes offline
1503	 * must be allocated before adding this @mp to the cpu-dead handler's
1504	 * mount list.
1505	 */
1506	xfs_mount_list_add(mp);
1507
1508	/* Allocate stats memory before we do operations that might use it */
1509	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1510	if (!mp->m_stats.xs_stats) {
1511		error = -ENOMEM;
1512		goto out_destroy_inodegc;
1513	}
1514
1515	error = xfs_readsb(mp, flags);
1516	if (error)
1517		goto out_free_stats;
1518
1519	error = xfs_finish_flags(mp);
1520	if (error)
1521		goto out_free_sb;
1522
1523	error = xfs_setup_devices(mp);
1524	if (error)
1525		goto out_free_sb;
1526
1527	/* V4 support is undergoing deprecation. */
1528	if (!xfs_has_crc(mp)) {
1529#ifdef CONFIG_XFS_SUPPORT_V4
1530		xfs_warn_once(mp,
1531	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1532#else
1533		xfs_warn(mp,
1534	"Deprecated V4 format (crc=0) not supported by kernel.");
1535		error = -EINVAL;
1536		goto out_free_sb;
1537#endif
1538	}
1539
1540	/* Filesystem claims it needs repair, so refuse the mount. */
1541	if (xfs_has_needsrepair(mp)) {
1542		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1543		error = -EFSCORRUPTED;
1544		goto out_free_sb;
1545	}
1546
1547	/*
1548	 * Don't touch the filesystem if a user tool thinks it owns the primary
1549	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1550	 * we don't check them at all.
1551	 */
1552	if (mp->m_sb.sb_inprogress) {
1553		xfs_warn(mp, "Offline file system operation in progress!");
1554		error = -EFSCORRUPTED;
1555		goto out_free_sb;
1556	}
1557
1558	/*
1559	 * Until this is fixed only page-sized or smaller data blocks work.
1560	 */
1561	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1562		xfs_warn(mp,
1563		"File system with blocksize %d bytes. "
1564		"Only pagesize (%ld) or less will currently work.",
1565				mp->m_sb.sb_blocksize, PAGE_SIZE);
1566		error = -ENOSYS;
1567		goto out_free_sb;
1568	}
1569
1570	/* Ensure this filesystem fits in the page cache limits */
1571	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1572	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1573		xfs_warn(mp,
1574		"file system too large to be mounted on this system.");
1575		error = -EFBIG;
1576		goto out_free_sb;
1577	}
1578
1579	/*
1580	 * XFS block mappings use 54 bits to store the logical block offset.
1581	 * This should suffice to handle the maximum file size that the VFS
1582	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1583	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1584	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1585	 * to check this assertion.
1586	 *
1587	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1588	 * maximum pagecache offset in units of fs blocks.
1589	 */
1590	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1591		xfs_warn(mp,
1592"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1593			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1594			 XFS_MAX_FILEOFF);
1595		error = -EINVAL;
1596		goto out_free_sb;
1597	}
1598
1599	error = xfs_filestream_mount(mp);
1600	if (error)
1601		goto out_free_sb;
1602
1603	/*
1604	 * we must configure the block size in the superblock before we run the
1605	 * full mount process as the mount process can lookup and cache inodes.
1606	 */
1607	sb->s_magic = XFS_SUPER_MAGIC;
1608	sb->s_blocksize = mp->m_sb.sb_blocksize;
1609	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1610	sb->s_maxbytes = MAX_LFS_FILESIZE;
1611	sb->s_max_links = XFS_MAXLINK;
1612	sb->s_time_gran = 1;
1613	if (xfs_has_bigtime(mp)) {
1614		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1615		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1616	} else {
1617		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1618		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1619	}
1620	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1621	sb->s_iflags |= SB_I_CGROUPWB;
1622
1623	set_posix_acl_flag(sb);
1624
1625	/* version 5 superblocks support inode version counters. */
1626	if (xfs_has_crc(mp))
1627		sb->s_flags |= SB_I_VERSION;
1628
1629	if (xfs_has_dax_always(mp)) {
1630		error = xfs_setup_dax_always(mp);
1631		if (error)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1632			goto out_filestream_unmount;
 
1633	}
1634
1635	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1636		xfs_warn(mp,
1637	"mounting with \"discard\" option, but the device does not support discard");
1638		mp->m_features &= ~XFS_FEAT_DISCARD;
 
 
 
 
1639	}
1640
1641	if (xfs_has_reflink(mp)) {
1642		if (mp->m_sb.sb_rblocks) {
1643			xfs_alert(mp,
1644	"reflink not compatible with realtime device!");
1645			error = -EINVAL;
1646			goto out_filestream_unmount;
1647		}
1648
1649		if (xfs_globals.always_cow) {
1650			xfs_info(mp, "using DEBUG-only always_cow mode.");
1651			mp->m_always_cow = true;
1652		}
1653	}
1654
1655	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1656		xfs_alert(mp,
1657	"reverse mapping btree not compatible with realtime device!");
1658		error = -EINVAL;
1659		goto out_filestream_unmount;
1660	}
1661
1662	if (xfs_has_large_extent_counts(mp))
1663		xfs_warn(mp,
1664	"EXPERIMENTAL Large extent counts feature in use. Use at your own risk!");
1665
1666	error = xfs_mountfs(mp);
1667	if (error)
1668		goto out_filestream_unmount;
1669
1670	root = igrab(VFS_I(mp->m_rootip));
1671	if (!root) {
1672		error = -ENOENT;
1673		goto out_unmount;
1674	}
1675	sb->s_root = d_make_root(root);
1676	if (!sb->s_root) {
1677		error = -ENOMEM;
1678		goto out_unmount;
1679	}
1680
1681	return 0;
1682
1683 out_filestream_unmount:
1684	xfs_filestream_unmount(mp);
1685 out_free_sb:
1686	xfs_freesb(mp);
1687 out_free_stats:
1688	free_percpu(mp->m_stats.xs_stats);
1689 out_destroy_inodegc:
1690	xfs_mount_list_del(mp);
1691	xfs_inodegc_free_percpu(mp);
1692 out_destroy_counters:
1693	xfs_destroy_percpu_counters(mp);
1694 out_destroy_workqueues:
1695	xfs_destroy_mount_workqueues(mp);
1696 out_close_devices:
1697	xfs_close_devices(mp);
1698 out_free_names:
1699	sb->s_fs_info = NULL;
1700	xfs_mount_free(mp);
1701	return error;
1702
1703 out_unmount:
1704	xfs_filestream_unmount(mp);
1705	xfs_unmountfs(mp);
1706	goto out_free_sb;
1707}
1708
1709static int
1710xfs_fs_get_tree(
1711	struct fs_context	*fc)
1712{
1713	return get_tree_bdev(fc, xfs_fs_fill_super);
1714}
1715
1716static int
1717xfs_remount_rw(
1718	struct xfs_mount	*mp)
1719{
1720	struct xfs_sb		*sbp = &mp->m_sb;
1721	int error;
1722
1723	if (xfs_has_norecovery(mp)) {
1724		xfs_warn(mp,
1725			"ro->rw transition prohibited on norecovery mount");
1726		return -EINVAL;
1727	}
1728
1729	if (xfs_sb_is_v5(sbp) &&
1730	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1731		xfs_warn(mp,
1732	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1733			(sbp->sb_features_ro_compat &
1734				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1735		return -EINVAL;
1736	}
1737
1738	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1739
1740	/*
1741	 * If this is the first remount to writeable state we might have some
1742	 * superblock changes to update.
1743	 */
1744	if (mp->m_update_sb) {
1745		error = xfs_sync_sb(mp, false);
1746		if (error) {
1747			xfs_warn(mp, "failed to write sb changes");
1748			return error;
1749		}
1750		mp->m_update_sb = false;
1751	}
1752
1753	/*
1754	 * Fill out the reserve pool if it is empty. Use the stashed value if
1755	 * it is non-zero, otherwise go with the default.
1756	 */
1757	xfs_restore_resvblks(mp);
1758	xfs_log_work_queue(mp);
 
 
 
 
 
 
 
 
 
1759	xfs_blockgc_start(mp);
1760
1761	/* Create the per-AG metadata reservation pool .*/
1762	error = xfs_fs_reserve_ag_blocks(mp);
1763	if (error && error != -ENOSPC)
1764		return error;
1765
1766	/* Re-enable the background inode inactivation worker. */
1767	xfs_inodegc_start(mp);
1768
1769	return 0;
1770}
1771
1772static int
1773xfs_remount_ro(
1774	struct xfs_mount	*mp)
1775{
1776	struct xfs_icwalk	icw = {
1777		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1778	};
1779	int			error;
1780
1781	/* Flush all the dirty data to disk. */
1782	error = sync_filesystem(mp->m_super);
1783	if (error)
1784		return error;
1785
1786	/*
1787	 * Cancel background eofb scanning so it cannot race with the final
1788	 * log force+buftarg wait and deadlock the remount.
1789	 */
1790	xfs_blockgc_stop(mp);
1791
1792	/*
1793	 * Clear out all remaining COW staging extents and speculative post-EOF
1794	 * preallocations so that we don't leave inodes requiring inactivation
1795	 * cleanups during reclaim on a read-only mount.  We must process every
1796	 * cached inode, so this requires a synchronous cache scan.
1797	 */
1798	error = xfs_blockgc_free_space(mp, &icw);
1799	if (error) {
1800		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1801		return error;
1802	}
1803
1804	/*
1805	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1806	 * flushed all pending inodegc work when it sync'd the filesystem.
1807	 * The VFS holds s_umount, so we know that inodes cannot enter
1808	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1809	 * we send inodes straight to reclaim, so no inodes will be queued.
1810	 */
1811	xfs_inodegc_stop(mp);
1812
1813	/* Free the per-AG metadata reservation pool. */
1814	error = xfs_fs_unreserve_ag_blocks(mp);
1815	if (error) {
1816		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1817		return error;
1818	}
1819
1820	/*
1821	 * Before we sync the metadata, we need to free up the reserve block
1822	 * pool so that the used block count in the superblock on disk is
1823	 * correct at the end of the remount. Stash the current* reserve pool
1824	 * size so that if we get remounted rw, we can return it to the same
1825	 * size.
1826	 */
1827	xfs_save_resvblks(mp);
1828
1829	xfs_log_clean(mp);
1830	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1831
1832	return 0;
1833}
1834
1835/*
1836 * Logically we would return an error here to prevent users from believing
1837 * they might have changed mount options using remount which can't be changed.
1838 *
1839 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1840 * arguments in some cases so we can't blindly reject options, but have to
1841 * check for each specified option if it actually differs from the currently
1842 * set option and only reject it if that's the case.
1843 *
1844 * Until that is implemented we return success for every remount request, and
1845 * silently ignore all options that we can't actually change.
1846 */
1847static int
1848xfs_fs_reconfigure(
1849	struct fs_context *fc)
1850{
1851	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1852	struct xfs_mount        *new_mp = fc->s_fs_info;
 
1853	int			flags = fc->sb_flags;
1854	int			error;
1855
1856	/* version 5 superblocks always support version counters. */
1857	if (xfs_has_crc(mp))
1858		fc->sb_flags |= SB_I_VERSION;
1859
1860	error = xfs_fs_validate_params(new_mp);
1861	if (error)
1862		return error;
1863
 
 
1864	/* inode32 -> inode64 */
1865	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1866		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1867		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
 
1868	}
1869
1870	/* inode64 -> inode32 */
1871	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1872		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1873		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
 
1874	}
1875
1876	/* ro -> rw */
1877	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1878		error = xfs_remount_rw(mp);
1879		if (error)
1880			return error;
1881	}
1882
1883	/* rw -> ro */
1884	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1885		error = xfs_remount_ro(mp);
1886		if (error)
1887			return error;
1888	}
1889
1890	return 0;
1891}
1892
1893static void xfs_fs_free(
1894	struct fs_context	*fc)
1895{
1896	struct xfs_mount	*mp = fc->s_fs_info;
1897
1898	/*
1899	 * mp is stored in the fs_context when it is initialized.
1900	 * mp is transferred to the superblock on a successful mount,
1901	 * but if an error occurs before the transfer we have to free
1902	 * it here.
1903	 */
1904	if (mp)
1905		xfs_mount_free(mp);
1906}
1907
1908static const struct fs_context_operations xfs_context_ops = {
1909	.parse_param = xfs_fs_parse_param,
1910	.get_tree    = xfs_fs_get_tree,
1911	.reconfigure = xfs_fs_reconfigure,
1912	.free        = xfs_fs_free,
1913};
1914
1915static int xfs_init_fs_context(
1916	struct fs_context	*fc)
1917{
1918	struct xfs_mount	*mp;
1919
1920	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1921	if (!mp)
1922		return -ENOMEM;
1923
1924	spin_lock_init(&mp->m_sb_lock);
1925	spin_lock_init(&mp->m_agirotor_lock);
1926	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1927	spin_lock_init(&mp->m_perag_lock);
1928	mutex_init(&mp->m_growlock);
1929	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1930	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1931	mp->m_kobj.kobject.kset = xfs_kset;
1932	/*
1933	 * We don't create the finobt per-ag space reservation until after log
1934	 * recovery, so we must set this to true so that an ifree transaction
1935	 * started during log recovery will not depend on space reservations
1936	 * for finobt expansion.
1937	 */
1938	mp->m_finobt_nores = true;
1939
1940	/*
1941	 * These can be overridden by the mount option parsing.
1942	 */
1943	mp->m_logbufs = -1;
1944	mp->m_logbsize = -1;
1945	mp->m_allocsize_log = 16; /* 64k */
1946
1947	/*
1948	 * Copy binary VFS mount flags we are interested in.
1949	 */
1950	if (fc->sb_flags & SB_RDONLY)
1951		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1952	if (fc->sb_flags & SB_DIRSYNC)
1953		mp->m_features |= XFS_FEAT_DIRSYNC;
1954	if (fc->sb_flags & SB_SYNCHRONOUS)
1955		mp->m_features |= XFS_FEAT_WSYNC;
1956
1957	fc->s_fs_info = mp;
1958	fc->ops = &xfs_context_ops;
1959
1960	return 0;
1961}
1962
1963static struct file_system_type xfs_fs_type = {
1964	.owner			= THIS_MODULE,
1965	.name			= "xfs",
1966	.init_fs_context	= xfs_init_fs_context,
1967	.parameters		= xfs_fs_parameters,
1968	.kill_sb		= kill_block_super,
1969	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1970};
1971MODULE_ALIAS_FS("xfs");
1972
1973STATIC int __init
1974xfs_init_caches(void)
1975{
1976	int		error;
1977
1978	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
1979					 SLAB_HWCACHE_ALIGN |
1980					 SLAB_RECLAIM_ACCOUNT |
1981					 SLAB_MEM_SPREAD,
1982					 NULL);
1983	if (!xfs_buf_cache)
1984		goto out;
1985
1986	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
1987						sizeof(struct xlog_ticket),
1988						0, 0, NULL);
1989	if (!xfs_log_ticket_cache)
1990		goto out_destroy_buf_cache;
1991
1992	error = xfs_btree_init_cur_caches();
1993	if (error)
1994		goto out_destroy_log_ticket_cache;
1995
1996	error = xfs_defer_init_item_caches();
1997	if (error)
1998		goto out_destroy_btree_cur_cache;
 
 
 
 
 
 
 
 
1999
2000	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2001					      sizeof(struct xfs_da_state),
2002					      0, 0, NULL);
2003	if (!xfs_da_state_cache)
2004		goto out_destroy_defer_item_cache;
2005
2006	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2007					   sizeof(struct xfs_ifork),
2008					   0, 0, NULL);
2009	if (!xfs_ifork_cache)
2010		goto out_destroy_da_state_cache;
2011
2012	xfs_trans_cache = kmem_cache_create("xfs_trans",
2013					   sizeof(struct xfs_trans),
2014					   0, 0, NULL);
2015	if (!xfs_trans_cache)
2016		goto out_destroy_ifork_cache;
2017
2018
2019	/*
2020	 * The size of the cache-allocated buf log item is the maximum
2021	 * size possible under XFS.  This wastes a little bit of memory,
2022	 * but it is much faster.
2023	 */
2024	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2025					      sizeof(struct xfs_buf_log_item),
2026					      0, 0, NULL);
2027	if (!xfs_buf_item_cache)
2028		goto out_destroy_trans_cache;
2029
2030	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2031			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2032			0, 0, NULL);
2033	if (!xfs_efd_cache)
2034		goto out_destroy_buf_item_cache;
2035
2036	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2037			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2038			0, 0, NULL);
2039	if (!xfs_efi_cache)
2040		goto out_destroy_efd_cache;
 
 
 
 
2041
2042	xfs_inode_cache = kmem_cache_create("xfs_inode",
2043					   sizeof(struct xfs_inode), 0,
2044					   (SLAB_HWCACHE_ALIGN |
2045					    SLAB_RECLAIM_ACCOUNT |
2046					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2047					   xfs_fs_inode_init_once);
2048	if (!xfs_inode_cache)
2049		goto out_destroy_efi_cache;
2050
2051	xfs_ili_cache = kmem_cache_create("xfs_ili",
2052					 sizeof(struct xfs_inode_log_item), 0,
2053					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2054					 NULL);
2055	if (!xfs_ili_cache)
2056		goto out_destroy_inode_cache;
2057
2058	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2059					     sizeof(struct xfs_icreate_item),
2060					     0, 0, NULL);
2061	if (!xfs_icreate_cache)
2062		goto out_destroy_ili_cache;
2063
2064	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2065					 sizeof(struct xfs_rud_log_item),
2066					 0, 0, NULL);
2067	if (!xfs_rud_cache)
2068		goto out_destroy_icreate_cache;
2069
2070	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2071			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2072			0, 0, NULL);
2073	if (!xfs_rui_cache)
2074		goto out_destroy_rud_cache;
2075
2076	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2077					 sizeof(struct xfs_cud_log_item),
2078					 0, 0, NULL);
2079	if (!xfs_cud_cache)
2080		goto out_destroy_rui_cache;
2081
2082	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2083			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2084			0, 0, NULL);
2085	if (!xfs_cui_cache)
2086		goto out_destroy_cud_cache;
2087
2088	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2089					 sizeof(struct xfs_bud_log_item),
2090					 0, 0, NULL);
2091	if (!xfs_bud_cache)
2092		goto out_destroy_cui_cache;
2093
2094	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2095			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2096			0, 0, NULL);
2097	if (!xfs_bui_cache)
2098		goto out_destroy_bud_cache;
2099
2100	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2101					    sizeof(struct xfs_attrd_log_item),
2102					    0, 0, NULL);
2103	if (!xfs_attrd_cache)
2104		goto out_destroy_bui_cache;
2105
2106	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2107					    sizeof(struct xfs_attri_log_item),
2108					    0, 0, NULL);
2109	if (!xfs_attri_cache)
2110		goto out_destroy_attrd_cache;
2111
2112	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2113					     sizeof(struct xfs_iunlink_item),
2114					     0, 0, NULL);
2115	if (!xfs_iunlink_cache)
2116		goto out_destroy_attri_cache;
2117
2118	return 0;
2119
2120 out_destroy_attri_cache:
2121	kmem_cache_destroy(xfs_attri_cache);
2122 out_destroy_attrd_cache:
2123	kmem_cache_destroy(xfs_attrd_cache);
2124 out_destroy_bui_cache:
2125	kmem_cache_destroy(xfs_bui_cache);
2126 out_destroy_bud_cache:
2127	kmem_cache_destroy(xfs_bud_cache);
2128 out_destroy_cui_cache:
2129	kmem_cache_destroy(xfs_cui_cache);
2130 out_destroy_cud_cache:
2131	kmem_cache_destroy(xfs_cud_cache);
2132 out_destroy_rui_cache:
2133	kmem_cache_destroy(xfs_rui_cache);
2134 out_destroy_rud_cache:
2135	kmem_cache_destroy(xfs_rud_cache);
2136 out_destroy_icreate_cache:
2137	kmem_cache_destroy(xfs_icreate_cache);
2138 out_destroy_ili_cache:
2139	kmem_cache_destroy(xfs_ili_cache);
2140 out_destroy_inode_cache:
2141	kmem_cache_destroy(xfs_inode_cache);
2142 out_destroy_efi_cache:
2143	kmem_cache_destroy(xfs_efi_cache);
2144 out_destroy_efd_cache:
2145	kmem_cache_destroy(xfs_efd_cache);
2146 out_destroy_buf_item_cache:
2147	kmem_cache_destroy(xfs_buf_item_cache);
2148 out_destroy_trans_cache:
2149	kmem_cache_destroy(xfs_trans_cache);
2150 out_destroy_ifork_cache:
2151	kmem_cache_destroy(xfs_ifork_cache);
2152 out_destroy_da_state_cache:
2153	kmem_cache_destroy(xfs_da_state_cache);
2154 out_destroy_defer_item_cache:
2155	xfs_defer_destroy_item_caches();
2156 out_destroy_btree_cur_cache:
2157	xfs_btree_destroy_cur_caches();
2158 out_destroy_log_ticket_cache:
2159	kmem_cache_destroy(xfs_log_ticket_cache);
2160 out_destroy_buf_cache:
2161	kmem_cache_destroy(xfs_buf_cache);
2162 out:
2163	return -ENOMEM;
2164}
2165
2166STATIC void
2167xfs_destroy_caches(void)
2168{
2169	/*
2170	 * Make sure all delayed rcu free are flushed before we
2171	 * destroy caches.
2172	 */
2173	rcu_barrier();
2174	kmem_cache_destroy(xfs_iunlink_cache);
2175	kmem_cache_destroy(xfs_attri_cache);
2176	kmem_cache_destroy(xfs_attrd_cache);
2177	kmem_cache_destroy(xfs_bui_cache);
2178	kmem_cache_destroy(xfs_bud_cache);
2179	kmem_cache_destroy(xfs_cui_cache);
2180	kmem_cache_destroy(xfs_cud_cache);
2181	kmem_cache_destroy(xfs_rui_cache);
2182	kmem_cache_destroy(xfs_rud_cache);
2183	kmem_cache_destroy(xfs_icreate_cache);
2184	kmem_cache_destroy(xfs_ili_cache);
2185	kmem_cache_destroy(xfs_inode_cache);
2186	kmem_cache_destroy(xfs_efi_cache);
2187	kmem_cache_destroy(xfs_efd_cache);
2188	kmem_cache_destroy(xfs_buf_item_cache);
2189	kmem_cache_destroy(xfs_trans_cache);
2190	kmem_cache_destroy(xfs_ifork_cache);
2191	kmem_cache_destroy(xfs_da_state_cache);
2192	xfs_defer_destroy_item_caches();
2193	xfs_btree_destroy_cur_caches();
2194	kmem_cache_destroy(xfs_log_ticket_cache);
2195	kmem_cache_destroy(xfs_buf_cache);
2196}
2197
2198STATIC int __init
2199xfs_init_workqueues(void)
2200{
2201	/*
2202	 * The allocation workqueue can be used in memory reclaim situations
2203	 * (writepage path), and parallelism is only limited by the number of
2204	 * AGs in all the filesystems mounted. Hence use the default large
2205	 * max_active value for this workqueue.
2206	 */
2207	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2208			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2209	if (!xfs_alloc_wq)
2210		return -ENOMEM;
2211
2212	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2213			0);
2214	if (!xfs_discard_wq)
2215		goto out_free_alloc_wq;
2216
2217	return 0;
2218out_free_alloc_wq:
2219	destroy_workqueue(xfs_alloc_wq);
2220	return -ENOMEM;
2221}
2222
2223STATIC void
2224xfs_destroy_workqueues(void)
2225{
2226	destroy_workqueue(xfs_discard_wq);
2227	destroy_workqueue(xfs_alloc_wq);
2228}
2229
2230#ifdef CONFIG_HOTPLUG_CPU
2231static int
2232xfs_cpu_dead(
2233	unsigned int		cpu)
2234{
2235	struct xfs_mount	*mp, *n;
2236
2237	spin_lock(&xfs_mount_list_lock);
2238	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2239		spin_unlock(&xfs_mount_list_lock);
2240		xfs_inodegc_cpu_dead(mp, cpu);
2241		xlog_cil_pcp_dead(mp->m_log, cpu);
2242		spin_lock(&xfs_mount_list_lock);
2243	}
2244	spin_unlock(&xfs_mount_list_lock);
2245	return 0;
2246}
2247
2248static int __init
2249xfs_cpu_hotplug_init(void)
2250{
2251	int	error;
2252
2253	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2254			xfs_cpu_dead);
2255	if (error < 0)
2256		xfs_alert(NULL,
2257"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2258			error);
2259	return error;
2260}
2261
2262static void
2263xfs_cpu_hotplug_destroy(void)
2264{
2265	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2266}
2267
2268#else /* !CONFIG_HOTPLUG_CPU */
2269static inline int xfs_cpu_hotplug_init(void) { return 0; }
2270static inline void xfs_cpu_hotplug_destroy(void) {}
2271#endif
2272
2273STATIC int __init
2274init_xfs_fs(void)
2275{
2276	int			error;
2277
2278	xfs_check_ondisk_structs();
2279
2280	printk(KERN_INFO XFS_VERSION_STRING " with "
2281			 XFS_BUILD_OPTIONS " enabled\n");
2282
2283	xfs_dir_startup();
2284
2285	error = xfs_cpu_hotplug_init();
2286	if (error)
2287		goto out;
2288
2289	error = xfs_init_caches();
2290	if (error)
2291		goto out_destroy_hp;
2292
2293	error = xfs_init_workqueues();
2294	if (error)
2295		goto out_destroy_caches;
2296
2297	error = xfs_mru_cache_init();
2298	if (error)
2299		goto out_destroy_wq;
2300
 
 
 
 
2301	error = xfs_init_procfs();
2302	if (error)
2303		goto out_mru_cache_uninit;
2304
2305	error = xfs_sysctl_register();
2306	if (error)
2307		goto out_cleanup_procfs;
2308
2309	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2310	if (!xfs_kset) {
2311		error = -ENOMEM;
2312		goto out_sysctl_unregister;
2313	}
2314
2315	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2316
2317	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2318	if (!xfsstats.xs_stats) {
2319		error = -ENOMEM;
2320		goto out_kset_unregister;
2321	}
2322
2323	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2324			       "stats");
2325	if (error)
2326		goto out_free_stats;
2327
2328#ifdef DEBUG
2329	xfs_dbg_kobj.kobject.kset = xfs_kset;
2330	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2331	if (error)
2332		goto out_remove_stats_kobj;
2333#endif
2334
2335	error = xfs_qm_init();
2336	if (error)
2337		goto out_remove_dbg_kobj;
2338
2339	error = register_filesystem(&xfs_fs_type);
2340	if (error)
2341		goto out_qm_exit;
2342	return 0;
2343
2344 out_qm_exit:
2345	xfs_qm_exit();
2346 out_remove_dbg_kobj:
2347#ifdef DEBUG
2348	xfs_sysfs_del(&xfs_dbg_kobj);
2349 out_remove_stats_kobj:
2350#endif
2351	xfs_sysfs_del(&xfsstats.xs_kobj);
2352 out_free_stats:
2353	free_percpu(xfsstats.xs_stats);
2354 out_kset_unregister:
2355	kset_unregister(xfs_kset);
2356 out_sysctl_unregister:
2357	xfs_sysctl_unregister();
2358 out_cleanup_procfs:
2359	xfs_cleanup_procfs();
 
 
2360 out_mru_cache_uninit:
2361	xfs_mru_cache_uninit();
2362 out_destroy_wq:
2363	xfs_destroy_workqueues();
2364 out_destroy_caches:
2365	xfs_destroy_caches();
2366 out_destroy_hp:
2367	xfs_cpu_hotplug_destroy();
2368 out:
2369	return error;
2370}
2371
2372STATIC void __exit
2373exit_xfs_fs(void)
2374{
2375	xfs_qm_exit();
2376	unregister_filesystem(&xfs_fs_type);
2377#ifdef DEBUG
2378	xfs_sysfs_del(&xfs_dbg_kobj);
2379#endif
2380	xfs_sysfs_del(&xfsstats.xs_kobj);
2381	free_percpu(xfsstats.xs_stats);
2382	kset_unregister(xfs_kset);
2383	xfs_sysctl_unregister();
2384	xfs_cleanup_procfs();
 
2385	xfs_mru_cache_uninit();
2386	xfs_destroy_workqueues();
2387	xfs_destroy_caches();
2388	xfs_uuid_table_free();
2389	xfs_cpu_hotplug_destroy();
2390}
2391
2392module_init(init_xfs_fs);
2393module_exit(exit_xfs_fs);
2394
2395MODULE_AUTHOR("Silicon Graphics, Inc.");
2396MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2397MODULE_LICENSE("GPL");