Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18
  19#include "xfs.h"
  20#include "xfs_shared.h"
  21#include "xfs_format.h"
  22#include "xfs_log_format.h"
  23#include "xfs_trans_resv.h"
  24#include "xfs_sb.h"
  25#include "xfs_mount.h"
  26#include "xfs_da_format.h"
  27#include "xfs_inode.h"
  28#include "xfs_btree.h"
  29#include "xfs_bmap.h"
  30#include "xfs_alloc.h"
  31#include "xfs_error.h"
  32#include "xfs_fsops.h"
  33#include "xfs_trans.h"
  34#include "xfs_buf_item.h"
  35#include "xfs_log.h"
  36#include "xfs_log_priv.h"
  37#include "xfs_da_btree.h"
  38#include "xfs_dir2.h"
  39#include "xfs_extfree_item.h"
  40#include "xfs_mru_cache.h"
  41#include "xfs_inode_item.h"
  42#include "xfs_icache.h"
  43#include "xfs_trace.h"
  44#include "xfs_icreate_item.h"
  45#include "xfs_filestream.h"
  46#include "xfs_quota.h"
  47#include "xfs_sysfs.h"
  48#include "xfs_ondisk.h"
  49#include "xfs_rmap_item.h"
  50#include "xfs_refcount_item.h"
  51#include "xfs_bmap_item.h"
  52#include "xfs_reflink.h"
  53
  54#include <linux/namei.h>
  55#include <linux/dax.h>
  56#include <linux/init.h>
  57#include <linux/slab.h>
  58#include <linux/mount.h>
  59#include <linux/mempool.h>
  60#include <linux/writeback.h>
  61#include <linux/kthread.h>
  62#include <linux/freezer.h>
  63#include <linux/parser.h>
 
 
  64
  65static const struct super_operations xfs_super_operations;
  66struct bio_set *xfs_ioend_bioset;
  67
 
  68static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
  69#ifdef DEBUG
  70static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
  71#endif
  72
  73/*
  74 * Table driven mount option parser.
  75 */
  76enum {
  77	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
  78	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
  79	Opt_mtpt, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
  80	Opt_allocsize, Opt_norecovery, Opt_barrier, Opt_nobarrier,
  81	Opt_inode64, Opt_inode32, Opt_ikeep, Opt_noikeep,
  82	Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, Opt_filestreams,
  83	Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota,
  84	Opt_uquota, Opt_gquota, Opt_pquota,
  85	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
  86	Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
  87};
  88
  89static const match_table_t tokens = {
  90	{Opt_logbufs,	"logbufs=%u"},	/* number of XFS log buffers */
  91	{Opt_logbsize,	"logbsize=%s"},	/* size of XFS log buffers */
  92	{Opt_logdev,	"logdev=%s"},	/* log device */
  93	{Opt_rtdev,	"rtdev=%s"},	/* realtime I/O device */
  94	{Opt_biosize,	"biosize=%u"},	/* log2 of preferred buffered io size */
  95	{Opt_wsync,	"wsync"},	/* safe-mode nfs compatible mount */
  96	{Opt_noalign,	"noalign"},	/* turn off stripe alignment */
  97	{Opt_swalloc,	"swalloc"},	/* turn on stripe width allocation */
  98	{Opt_sunit,	"sunit=%u"},	/* data volume stripe unit */
  99	{Opt_swidth,	"swidth=%u"},	/* data volume stripe width */
 100	{Opt_nouuid,	"nouuid"},	/* ignore filesystem UUID */
 101	{Opt_mtpt,	"mtpt"},	/* filesystem mount point */
 102	{Opt_grpid,	"grpid"},	/* group-ID from parent directory */
 103	{Opt_nogrpid,	"nogrpid"},	/* group-ID from current process */
 104	{Opt_bsdgroups,	"bsdgroups"},	/* group-ID from parent directory */
 105	{Opt_sysvgroups,"sysvgroups"},	/* group-ID from current process */
 106	{Opt_allocsize,	"allocsize=%s"},/* preferred allocation size */
 107	{Opt_norecovery,"norecovery"},	/* don't run XFS recovery */
 108	{Opt_inode64,	"inode64"},	/* inodes can be allocated anywhere */
 109	{Opt_inode32,   "inode32"},	/* inode allocation limited to
 110					 * XFS_MAXINUMBER_32 */
 111	{Opt_ikeep,	"ikeep"},	/* do not free empty inode clusters */
 112	{Opt_noikeep,	"noikeep"},	/* free empty inode clusters */
 113	{Opt_largeio,	"largeio"},	/* report large I/O sizes in stat() */
 114	{Opt_nolargeio,	"nolargeio"},	/* do not report large I/O sizes
 115					 * in stat(). */
 116	{Opt_attr2,	"attr2"},	/* do use attr2 attribute format */
 117	{Opt_noattr2,	"noattr2"},	/* do not use attr2 attribute format */
 118	{Opt_filestreams,"filestreams"},/* use filestreams allocator */
 119	{Opt_quota,	"quota"},	/* disk quotas (user) */
 120	{Opt_noquota,	"noquota"},	/* no quotas */
 121	{Opt_usrquota,	"usrquota"},	/* user quota enabled */
 122	{Opt_grpquota,	"grpquota"},	/* group quota enabled */
 123	{Opt_prjquota,	"prjquota"},	/* project quota enabled */
 124	{Opt_uquota,	"uquota"},	/* user quota (IRIX variant) */
 125	{Opt_gquota,	"gquota"},	/* group quota (IRIX variant) */
 126	{Opt_pquota,	"pquota"},	/* project quota (IRIX variant) */
 127	{Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
 128	{Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
 129	{Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
 130	{Opt_qnoenforce, "qnoenforce"},	/* same as uqnoenforce */
 131	{Opt_discard,	"discard"},	/* Discard unused blocks */
 132	{Opt_nodiscard,	"nodiscard"},	/* Do not discard unused blocks */
 133
 134	{Opt_dax,	"dax"},		/* Enable direct access to bdev pages */
 135
 136	/* Deprecated mount options scheduled for removal */
 137	{Opt_barrier,	"barrier"},	/* use writer barriers for log write and
 138					 * unwritten extent conversion */
 139	{Opt_nobarrier,	"nobarrier"},	/* .. disable */
 140
 141	{Opt_err,	NULL},
 142};
 143
 144
 145STATIC int
 146suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
 
 147{
 148	int	last, shift_left_factor = 0, _res;
 149	char	*value;
 150	int	ret = 0;
 151
 152	value = match_strdup(s);
 153	if (!value)
 154		return -ENOMEM;
 155
 156	last = strlen(value) - 1;
 157	if (value[last] == 'K' || value[last] == 'k') {
 158		shift_left_factor = 10;
 159		value[last] = '\0';
 160	}
 161	if (value[last] == 'M' || value[last] == 'm') {
 162		shift_left_factor = 20;
 163		value[last] = '\0';
 164	}
 165	if (value[last] == 'G' || value[last] == 'g') {
 166		shift_left_factor = 30;
 167		value[last] = '\0';
 168	}
 169
 170	if (kstrtoint(value, base, &_res))
 171		ret = -EINVAL;
 172	kfree(value);
 173	*res = _res << shift_left_factor;
 174	return ret;
 175}
 176
 
 
 
 
 
 
 
 177/*
 178 * This function fills in xfs_mount_t fields based on mount args.
 179 * Note: the superblock has _not_ yet been read in.
 180 *
 181 * Note that this function leaks the various device name allocations on
 182 * failure.  The caller takes care of them.
 183 *
 184 * *sb is const because this is also used to test options on the remount
 185 * path, and we don't want this to have any side effects at remount time.
 186 * Today this function does not change *sb, but just to future-proof...
 187 */
 188STATIC int
 189xfs_parseargs(
 190	struct xfs_mount	*mp,
 191	char			*options)
 192{
 193	const struct super_block *sb = mp->m_super;
 194	char			*p;
 195	substring_t		args[MAX_OPT_ARGS];
 196	int			dsunit = 0;
 197	int			dswidth = 0;
 198	int			iosize = 0;
 199	uint8_t			iosizelog = 0;
 200
 201	/*
 202	 * set up the mount name first so all the errors will refer to the
 203	 * correct device.
 204	 */
 205	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
 206	if (!mp->m_fsname)
 207		return -ENOMEM;
 208	mp->m_fsname_len = strlen(mp->m_fsname) + 1;
 209
 210	/*
 211	 * Copy binary VFS mount flags we are interested in.
 212	 */
 213	if (sb_rdonly(sb))
 214		mp->m_flags |= XFS_MOUNT_RDONLY;
 215	if (sb->s_flags & SB_DIRSYNC)
 216		mp->m_flags |= XFS_MOUNT_DIRSYNC;
 217	if (sb->s_flags & SB_SYNCHRONOUS)
 218		mp->m_flags |= XFS_MOUNT_WSYNC;
 219
 220	/*
 221	 * Set some default flags that could be cleared by the mount option
 222	 * parsing.
 223	 */
 224	mp->m_flags |= XFS_MOUNT_BARRIER;
 225	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
 226
 227	/*
 228	 * These can be overridden by the mount option parsing.
 229	 */
 230	mp->m_logbufs = -1;
 231	mp->m_logbsize = -1;
 232
 233	if (!options)
 234		goto done;
 235
 236	while ((p = strsep(&options, ",")) != NULL) {
 237		int		token;
 238
 239		if (!*p)
 240			continue;
 241
 242		token = match_token(p, tokens, args);
 243		switch (token) {
 244		case Opt_logbufs:
 245			if (match_int(args, &mp->m_logbufs))
 246				return -EINVAL;
 247			break;
 248		case Opt_logbsize:
 249			if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
 250				return -EINVAL;
 251			break;
 252		case Opt_logdev:
 253			kfree(mp->m_logname);
 254			mp->m_logname = match_strdup(args);
 255			if (!mp->m_logname)
 256				return -ENOMEM;
 257			break;
 258		case Opt_mtpt:
 259			xfs_warn(mp, "%s option not allowed on this system", p);
 260			return -EINVAL;
 261		case Opt_rtdev:
 262			kfree(mp->m_rtname);
 263			mp->m_rtname = match_strdup(args);
 264			if (!mp->m_rtname)
 265				return -ENOMEM;
 266			break;
 267		case Opt_allocsize:
 268		case Opt_biosize:
 269			if (suffix_kstrtoint(args, 10, &iosize))
 270				return -EINVAL;
 271			iosizelog = ffs(iosize) - 1;
 272			break;
 273		case Opt_grpid:
 274		case Opt_bsdgroups:
 275			mp->m_flags |= XFS_MOUNT_GRPID;
 276			break;
 277		case Opt_nogrpid:
 278		case Opt_sysvgroups:
 279			mp->m_flags &= ~XFS_MOUNT_GRPID;
 280			break;
 281		case Opt_wsync:
 282			mp->m_flags |= XFS_MOUNT_WSYNC;
 283			break;
 284		case Opt_norecovery:
 285			mp->m_flags |= XFS_MOUNT_NORECOVERY;
 286			break;
 287		case Opt_noalign:
 288			mp->m_flags |= XFS_MOUNT_NOALIGN;
 289			break;
 290		case Opt_swalloc:
 291			mp->m_flags |= XFS_MOUNT_SWALLOC;
 292			break;
 293		case Opt_sunit:
 294			if (match_int(args, &dsunit))
 295				return -EINVAL;
 296			break;
 297		case Opt_swidth:
 298			if (match_int(args, &dswidth))
 299				return -EINVAL;
 300			break;
 301		case Opt_inode32:
 302			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
 303			break;
 304		case Opt_inode64:
 305			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
 306			break;
 307		case Opt_nouuid:
 308			mp->m_flags |= XFS_MOUNT_NOUUID;
 309			break;
 310		case Opt_ikeep:
 311			mp->m_flags |= XFS_MOUNT_IKEEP;
 312			break;
 313		case Opt_noikeep:
 314			mp->m_flags &= ~XFS_MOUNT_IKEEP;
 315			break;
 316		case Opt_largeio:
 317			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
 318			break;
 319		case Opt_nolargeio:
 320			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
 321			break;
 322		case Opt_attr2:
 323			mp->m_flags |= XFS_MOUNT_ATTR2;
 324			break;
 325		case Opt_noattr2:
 326			mp->m_flags &= ~XFS_MOUNT_ATTR2;
 327			mp->m_flags |= XFS_MOUNT_NOATTR2;
 328			break;
 329		case Opt_filestreams:
 330			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
 331			break;
 332		case Opt_noquota:
 333			mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
 334			mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
 335			mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
 336			break;
 337		case Opt_quota:
 338		case Opt_uquota:
 339		case Opt_usrquota:
 340			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
 341					 XFS_UQUOTA_ENFD);
 342			break;
 343		case Opt_qnoenforce:
 344		case Opt_uqnoenforce:
 345			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
 346			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
 347			break;
 348		case Opt_pquota:
 349		case Opt_prjquota:
 350			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
 351					 XFS_PQUOTA_ENFD);
 352			break;
 353		case Opt_pqnoenforce:
 354			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
 355			mp->m_qflags &= ~XFS_PQUOTA_ENFD;
 356			break;
 357		case Opt_gquota:
 358		case Opt_grpquota:
 359			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
 360					 XFS_GQUOTA_ENFD);
 361			break;
 362		case Opt_gqnoenforce:
 363			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
 364			mp->m_qflags &= ~XFS_GQUOTA_ENFD;
 365			break;
 366		case Opt_discard:
 367			mp->m_flags |= XFS_MOUNT_DISCARD;
 368			break;
 369		case Opt_nodiscard:
 370			mp->m_flags &= ~XFS_MOUNT_DISCARD;
 371			break;
 372#ifdef CONFIG_FS_DAX
 373		case Opt_dax:
 374			mp->m_flags |= XFS_MOUNT_DAX;
 375			break;
 376#endif
 377		case Opt_barrier:
 378			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
 379			mp->m_flags |= XFS_MOUNT_BARRIER;
 380			break;
 381		case Opt_nobarrier:
 382			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
 383			mp->m_flags &= ~XFS_MOUNT_BARRIER;
 384			break;
 385		default:
 386			xfs_warn(mp, "unknown mount option [%s].", p);
 387			return -EINVAL;
 388		}
 389	}
 390
 391	/*
 392	 * no recovery flag requires a read-only mount
 393	 */
 394	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
 395	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
 396		xfs_warn(mp, "no-recovery mounts must be read-only.");
 397		return -EINVAL;
 398	}
 399
 400	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
 401		xfs_warn(mp,
 402	"sunit and swidth options incompatible with the noalign option");
 403		return -EINVAL;
 404	}
 405
 406#ifndef CONFIG_XFS_QUOTA
 407	if (XFS_IS_QUOTA_RUNNING(mp)) {
 408		xfs_warn(mp, "quota support not available in this kernel.");
 409		return -EINVAL;
 410	}
 411#endif
 412
 413	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
 414		xfs_warn(mp, "sunit and swidth must be specified together");
 415		return -EINVAL;
 416	}
 417
 418	if (dsunit && (dswidth % dsunit != 0)) {
 419		xfs_warn(mp,
 420	"stripe width (%d) must be a multiple of the stripe unit (%d)",
 421			dswidth, dsunit);
 422		return -EINVAL;
 423	}
 424
 425done:
 426	if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
 427		/*
 428		 * At this point the superblock has not been read
 429		 * in, therefore we do not know the block size.
 430		 * Before the mount call ends we will convert
 431		 * these to FSBs.
 432		 */
 433		mp->m_dalign = dsunit;
 434		mp->m_swidth = dswidth;
 435	}
 436
 437	if (mp->m_logbufs != -1 &&
 438	    mp->m_logbufs != 0 &&
 439	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
 440	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
 441		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
 442			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
 443		return -EINVAL;
 444	}
 445	if (mp->m_logbsize != -1 &&
 446	    mp->m_logbsize !=  0 &&
 447	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
 448	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
 449	     !is_power_of_2(mp->m_logbsize))) {
 450		xfs_warn(mp,
 451			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
 452			mp->m_logbsize);
 453		return -EINVAL;
 454	}
 455
 456	if (iosizelog) {
 457		if (iosizelog > XFS_MAX_IO_LOG ||
 458		    iosizelog < XFS_MIN_IO_LOG) {
 459			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
 460				iosizelog, XFS_MIN_IO_LOG,
 461				XFS_MAX_IO_LOG);
 462			return -EINVAL;
 463		}
 464
 465		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
 466		mp->m_readio_log = iosizelog;
 467		mp->m_writeio_log = iosizelog;
 468	}
 469
 470	return 0;
 471}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472
 473struct proc_xfs_info {
 474	uint64_t	flag;
 475	char		*str;
 476};
 477
 478STATIC int
 479xfs_showargs(
 480	struct xfs_mount	*mp,
 481	struct seq_file		*m)
 482{
 483	static struct proc_xfs_info xfs_info_set[] = {
 484		/* the few simple ones we can get from the mount struct */
 485		{ XFS_MOUNT_IKEEP,		",ikeep" },
 486		{ XFS_MOUNT_WSYNC,		",wsync" },
 487		{ XFS_MOUNT_NOALIGN,		",noalign" },
 488		{ XFS_MOUNT_SWALLOC,		",swalloc" },
 489		{ XFS_MOUNT_NOUUID,		",nouuid" },
 490		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
 491		{ XFS_MOUNT_ATTR2,		",attr2" },
 492		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
 493		{ XFS_MOUNT_GRPID,		",grpid" },
 494		{ XFS_MOUNT_DISCARD,		",discard" },
 495		{ XFS_MOUNT_SMALL_INUMS,	",inode32" },
 496		{ XFS_MOUNT_DAX,		",dax" },
 497		{ 0, NULL }
 498	};
 499	static struct proc_xfs_info xfs_info_unset[] = {
 500		/* the few simple ones we can get from the mount struct */
 501		{ XFS_MOUNT_COMPAT_IOSIZE,	",largeio" },
 502		{ XFS_MOUNT_BARRIER,		",nobarrier" },
 503		{ XFS_MOUNT_SMALL_INUMS,	",inode64" },
 504		{ 0, NULL }
 505	};
 
 506	struct proc_xfs_info	*xfs_infop;
 507
 508	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 509		if (mp->m_flags & xfs_infop->flag)
 510			seq_puts(m, xfs_infop->str);
 511	}
 512	for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
 513		if (!(mp->m_flags & xfs_infop->flag))
 514			seq_puts(m, xfs_infop->str);
 515	}
 516
 517	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
 
 
 518		seq_printf(m, ",allocsize=%dk",
 519				(int)(1 << mp->m_writeio_log) >> 10);
 520
 521	if (mp->m_logbufs > 0)
 522		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
 523	if (mp->m_logbsize > 0)
 524		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
 525
 526	if (mp->m_logname)
 527		seq_show_option(m, "logdev", mp->m_logname);
 528	if (mp->m_rtname)
 529		seq_show_option(m, "rtdev", mp->m_rtname);
 530
 531	if (mp->m_dalign > 0)
 532		seq_printf(m, ",sunit=%d",
 533				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 534	if (mp->m_swidth > 0)
 535		seq_printf(m, ",swidth=%d",
 536				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 537
 538	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
 539		seq_puts(m, ",usrquota");
 540	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
 541		seq_puts(m, ",uqnoenforce");
 542
 543	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
 544		if (mp->m_qflags & XFS_PQUOTA_ENFD)
 545			seq_puts(m, ",prjquota");
 546		else
 547			seq_puts(m, ",pqnoenforce");
 548	}
 549	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
 550		if (mp->m_qflags & XFS_GQUOTA_ENFD)
 551			seq_puts(m, ",grpquota");
 552		else
 553			seq_puts(m, ",gqnoenforce");
 554	}
 555
 556	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 557		seq_puts(m, ",noquota");
 558
 559	return 0;
 560}
 561static uint64_t
 562xfs_max_file_offset(
 563	unsigned int		blockshift)
 564{
 565	unsigned int		pagefactor = 1;
 566	unsigned int		bitshift = BITS_PER_LONG - 1;
 567
 568	/* Figure out maximum filesize, on Linux this can depend on
 569	 * the filesystem blocksize (on 32 bit platforms).
 570	 * __block_write_begin does this in an [unsigned] long...
 571	 *      page->index << (PAGE_SHIFT - bbits)
 572	 * So, for page sized blocks (4K on 32 bit platforms),
 573	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
 574	 *      (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
 575	 * but for smaller blocksizes it is less (bbits = log2 bsize).
 576	 * Note1: get_block_t takes a long (implicit cast from above)
 577	 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
 578	 * can optionally convert the [unsigned] long from above into
 579	 * an [unsigned] long long.
 580	 */
 581
 582#if BITS_PER_LONG == 32
 583# if defined(CONFIG_LBDAF)
 584	ASSERT(sizeof(sector_t) == 8);
 585	pagefactor = PAGE_SIZE;
 586	bitshift = BITS_PER_LONG;
 587# else
 588	pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
 589# endif
 590#endif
 591
 592	return (((uint64_t)pagefactor) << bitshift) - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593}
 594
 595/*
 596 * Set parameters for inode allocation heuristics, taking into account
 597 * filesystem size and inode32/inode64 mount options; i.e. specifically
 598 * whether or not XFS_MOUNT_SMALL_INUMS is set.
 599 *
 600 * Inode allocation patterns are altered only if inode32 is requested
 601 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
 602 * If altered, XFS_MOUNT_32BITINODES is set as well.
 603 *
 604 * An agcount independent of that in the mount structure is provided
 605 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 606 * to the potentially higher ag count.
 607 *
 608 * Returns the maximum AG index which may contain inodes.
 609 */
 610xfs_agnumber_t
 611xfs_set_inode_alloc(
 612	struct xfs_mount *mp,
 613	xfs_agnumber_t	agcount)
 614{
 615	xfs_agnumber_t	index;
 616	xfs_agnumber_t	maxagi = 0;
 617	xfs_sb_t	*sbp = &mp->m_sb;
 618	xfs_agnumber_t	max_metadata;
 619	xfs_agino_t	agino;
 620	xfs_ino_t	ino;
 621
 622	/*
 623	 * Calculate how much should be reserved for inodes to meet
 624	 * the max inode percentage.  Used only for inode32.
 625	 */
 626	if (mp->m_maxicount) {
 627		uint64_t	icount;
 628
 629		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 630		do_div(icount, 100);
 631		icount += sbp->sb_agblocks - 1;
 632		do_div(icount, sbp->sb_agblocks);
 633		max_metadata = icount;
 634	} else {
 635		max_metadata = agcount;
 636	}
 637
 638	/* Get the last possible inode in the filesystem */
 639	agino =	XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
 640	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 641
 642	/*
 643	 * If user asked for no more than 32-bit inodes, and the fs is
 644	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
 645	 * the allocator to accommodate the request.
 646	 */
 647	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
 648		mp->m_flags |= XFS_MOUNT_32BITINODES;
 649	else
 650		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
 651
 652	for (index = 0; index < agcount; index++) {
 653		struct xfs_perag	*pag;
 654
 655		ino = XFS_AGINO_TO_INO(mp, index, agino);
 656
 657		pag = xfs_perag_get(mp, index);
 
 
 
 
 658
 659		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
 660			if (ino > XFS_MAXINUMBER_32) {
 661				pag->pagi_inodeok = 0;
 662				pag->pagf_metadata = 0;
 663			} else {
 664				pag->pagi_inodeok = 1;
 665				maxagi++;
 666				if (index < max_metadata)
 667					pag->pagf_metadata = 1;
 668				else
 669					pag->pagf_metadata = 0;
 670			}
 671		} else {
 672			pag->pagi_inodeok = 1;
 673			pag->pagf_metadata = 0;
 674		}
 675
 676		xfs_perag_put(pag);
 
 
 
 
 
 
 
 
 677	}
 678
 679	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680}
 681
 682STATIC int
 683xfs_blkdev_get(
 684	xfs_mount_t		*mp,
 685	const char		*name,
 686	struct block_device	**bdevp)
 687{
 688	int			error = 0;
 689
 690	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 691				    mp);
 692	if (IS_ERR(*bdevp)) {
 693		error = PTR_ERR(*bdevp);
 
 
 694		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
 695	}
 696
 697	return error;
 698}
 699
 700STATIC void
 701xfs_blkdev_put(
 702	struct block_device	*bdev)
 703{
 704	if (bdev)
 705		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 706}
 707
 708void
 709xfs_blkdev_issue_flush(
 710	xfs_buftarg_t		*buftarg)
 711{
 712	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
 713}
 714
 715STATIC void
 716xfs_close_devices(
 717	struct xfs_mount	*mp)
 718{
 719	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
 720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 722		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
 723		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
 724
 725		xfs_free_buftarg(mp->m_logdev_targp);
 726		xfs_blkdev_put(logdev);
 727		fs_put_dax(dax_logdev);
 728	}
 729	if (mp->m_rtdev_targp) {
 730		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
 731		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
 732
 733		xfs_free_buftarg(mp->m_rtdev_targp);
 734		xfs_blkdev_put(rtdev);
 735		fs_put_dax(dax_rtdev);
 736	}
 737	xfs_free_buftarg(mp->m_ddev_targp);
 738	fs_put_dax(dax_ddev);
 739}
 740
 741/*
 742 * The file system configurations are:
 743 *	(1) device (partition) with data and internal log
 744 *	(2) logical volume with data and log subvolumes.
 745 *	(3) logical volume with data, log, and realtime subvolumes.
 746 *
 747 * We only have to handle opening the log and realtime volumes here if
 748 * they are present.  The data subvolume has already been opened by
 749 * get_sb_bdev() and is stored in sb->s_bdev.
 750 */
 751STATIC int
 752xfs_open_devices(
 753	struct xfs_mount	*mp)
 754{
 755	struct block_device	*ddev = mp->m_super->s_bdev;
 756	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
 757	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
 758	struct block_device	*logdev = NULL, *rtdev = NULL;
 759	int			error;
 760
 761	/*
 762	 * Open real time and log devices - order is important.
 763	 */
 764	if (mp->m_logname) {
 765		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
 766		if (error)
 767			goto out;
 768		dax_logdev = fs_dax_get_by_bdev(logdev);
 769	}
 770
 771	if (mp->m_rtname) {
 772		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
 773		if (error)
 774			goto out_close_logdev;
 775
 776		if (rtdev == ddev || rtdev == logdev) {
 
 
 777			xfs_warn(mp,
 778	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 779			error = -EINVAL;
 780			goto out_close_rtdev;
 781		}
 782		dax_rtdev = fs_dax_get_by_bdev(rtdev);
 783	}
 784
 785	/*
 786	 * Setup xfs_mount buffer target pointers
 787	 */
 788	error = -ENOMEM;
 789	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
 790	if (!mp->m_ddev_targp)
 791		goto out_close_rtdev;
 792
 793	if (rtdev) {
 794		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
 795		if (!mp->m_rtdev_targp)
 796			goto out_free_ddev_targ;
 797	}
 798
 799	if (logdev && logdev != ddev) {
 800		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
 801		if (!mp->m_logdev_targp)
 802			goto out_free_rtdev_targ;
 803	} else {
 804		mp->m_logdev_targp = mp->m_ddev_targp;
 
 
 
 805	}
 806
 807	return 0;
 808
 809 out_free_rtdev_targ:
 810	if (mp->m_rtdev_targp)
 811		xfs_free_buftarg(mp->m_rtdev_targp);
 812 out_free_ddev_targ:
 813	xfs_free_buftarg(mp->m_ddev_targp);
 814 out_close_rtdev:
 815	xfs_blkdev_put(rtdev);
 816	fs_put_dax(dax_rtdev);
 817 out_close_logdev:
 818	if (logdev && logdev != ddev) {
 819		xfs_blkdev_put(logdev);
 820		fs_put_dax(dax_logdev);
 821	}
 822 out:
 823	fs_put_dax(dax_ddev);
 824	return error;
 825}
 826
 827/*
 828 * Setup xfs_mount buffer target pointers based on superblock
 829 */
 830STATIC int
 831xfs_setup_devices(
 832	struct xfs_mount	*mp)
 833{
 834	int			error;
 835
 836	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
 837	if (error)
 838		return error;
 839
 840	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 841		unsigned int	log_sector_size = BBSIZE;
 842
 843		if (xfs_sb_version_hassector(&mp->m_sb))
 844			log_sector_size = mp->m_sb.sb_logsectsize;
 845		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 846					    log_sector_size);
 847		if (error)
 848			return error;
 849	}
 850	if (mp->m_rtdev_targp) {
 851		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 852					    mp->m_sb.sb_sectsize);
 853		if (error)
 854			return error;
 855	}
 856
 857	return 0;
 858}
 859
 860STATIC int
 861xfs_init_mount_workqueues(
 862	struct xfs_mount	*mp)
 863{
 864	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
 865			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
 
 866	if (!mp->m_buf_workqueue)
 867		goto out;
 868
 869	mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
 870			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 871	if (!mp->m_data_workqueue)
 872		goto out_destroy_buf;
 873
 874	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
 875			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 
 876	if (!mp->m_unwritten_workqueue)
 877		goto out_destroy_data_iodone_queue;
 878
 879	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
 880			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 881	if (!mp->m_cil_workqueue)
 882		goto out_destroy_unwritten;
 883
 884	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
 885			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 
 886	if (!mp->m_reclaim_workqueue)
 887		goto out_destroy_cil;
 888
 889	mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
 890			WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0,
 891			mp->m_fsname);
 892	if (!mp->m_log_workqueue)
 893		goto out_destroy_reclaim;
 894
 895	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
 896			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 897	if (!mp->m_eofblocks_workqueue)
 898		goto out_destroy_log;
 
 899
 900	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
 901					       mp->m_fsname);
 902	if (!mp->m_sync_workqueue)
 903		goto out_destroy_eofb;
 904
 905	return 0;
 906
 907out_destroy_eofb:
 908	destroy_workqueue(mp->m_eofblocks_workqueue);
 909out_destroy_log:
 910	destroy_workqueue(mp->m_log_workqueue);
 911out_destroy_reclaim:
 912	destroy_workqueue(mp->m_reclaim_workqueue);
 913out_destroy_cil:
 914	destroy_workqueue(mp->m_cil_workqueue);
 915out_destroy_unwritten:
 916	destroy_workqueue(mp->m_unwritten_workqueue);
 917out_destroy_data_iodone_queue:
 918	destroy_workqueue(mp->m_data_workqueue);
 919out_destroy_buf:
 920	destroy_workqueue(mp->m_buf_workqueue);
 921out:
 922	return -ENOMEM;
 923}
 924
 925STATIC void
 926xfs_destroy_mount_workqueues(
 927	struct xfs_mount	*mp)
 928{
 929	destroy_workqueue(mp->m_sync_workqueue);
 930	destroy_workqueue(mp->m_eofblocks_workqueue);
 931	destroy_workqueue(mp->m_log_workqueue);
 932	destroy_workqueue(mp->m_reclaim_workqueue);
 933	destroy_workqueue(mp->m_cil_workqueue);
 934	destroy_workqueue(mp->m_data_workqueue);
 935	destroy_workqueue(mp->m_unwritten_workqueue);
 936	destroy_workqueue(mp->m_buf_workqueue);
 937}
 938
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939/*
 940 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 941 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 942 * for IO to complete so that we effectively throttle multiple callers to the
 943 * rate at which IO is completing.
 944 */
 945void
 946xfs_flush_inodes(
 947	struct xfs_mount	*mp)
 948{
 949	struct super_block	*sb = mp->m_super;
 
 
 
 
 
 950
 951	if (down_read_trylock(&sb->s_umount)) {
 952		sync_inodes_sb(sb);
 953		up_read(&sb->s_umount);
 954	}
 955}
 956
 957/* Catch misguided souls that try to use this interface on XFS */
 958STATIC struct inode *
 959xfs_fs_alloc_inode(
 960	struct super_block	*sb)
 961{
 962	BUG();
 963	return NULL;
 964}
 965
 966/*
 967 * Now that the generic code is guaranteed not to be accessing
 968 * the linux inode, we can inactivate and reclaim the inode.
 969 */
 970STATIC void
 971xfs_fs_destroy_inode(
 972	struct inode		*inode)
 973{
 974	struct xfs_inode	*ip = XFS_I(inode);
 975
 976	trace_xfs_destroy_inode(ip);
 977
 978	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 979	XFS_STATS_INC(ip->i_mount, vn_rele);
 980	XFS_STATS_INC(ip->i_mount, vn_remove);
 981
 982	xfs_inactive(ip);
 983
 984	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
 985	XFS_STATS_INC(ip->i_mount, vn_reclaim);
 986
 987	/*
 988	 * We should never get here with one of the reclaim flags already set.
 989	 */
 990	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
 991	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
 992
 993	/*
 994	 * We always use background reclaim here because even if the
 995	 * inode is clean, it still may be under IO and hence we have
 996	 * to take the flush lock. The background reclaim path handles
 997	 * this more efficiently than we can here, so simply let background
 998	 * reclaim tear down all inodes.
 999	 */
1000	xfs_inode_set_reclaim_tag(ip);
1001}
1002
1003static void
1004xfs_fs_dirty_inode(
1005	struct inode			*inode,
1006	int				flag)
1007{
1008	struct xfs_inode		*ip = XFS_I(inode);
1009	struct xfs_mount		*mp = ip->i_mount;
1010	struct xfs_trans		*tp;
1011
1012	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
1013		return;
1014	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
 
 
 
 
 
 
1015		return;
1016
1017	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
1018		return;
1019	xfs_ilock(ip, XFS_ILOCK_EXCL);
1020	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1021	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
1022	xfs_trans_commit(tp);
1023}
1024
1025/*
1026 * Slab object creation initialisation for the XFS inode.
1027 * This covers only the idempotent fields in the XFS inode;
1028 * all other fields need to be initialised on allocation
1029 * from the slab. This avoids the need to repeatedly initialise
1030 * fields in the xfs inode that left in the initialise state
1031 * when freeing the inode.
1032 */
1033STATIC void
1034xfs_fs_inode_init_once(
1035	void			*inode)
1036{
1037	struct xfs_inode	*ip = inode;
1038
1039	memset(ip, 0, sizeof(struct xfs_inode));
1040
1041	/* vfs inode */
1042	inode_init_once(VFS_I(ip));
1043
1044	/* xfs inode */
1045	atomic_set(&ip->i_pincount, 0);
1046	spin_lock_init(&ip->i_flags_lock);
1047
1048	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1049		     "xfsino", ip->i_ino);
1050	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1051		     "xfsino", ip->i_ino);
1052}
1053
1054/*
1055 * We do an unlocked check for XFS_IDONTCACHE here because we are already
1056 * serialised against cache hits here via the inode->i_lock and igrab() in
1057 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
1058 * racing with us, and it avoids needing to grab a spinlock here for every inode
1059 * we drop the final reference on.
1060 */
1061STATIC int
1062xfs_fs_drop_inode(
1063	struct inode		*inode)
1064{
1065	struct xfs_inode	*ip = XFS_I(inode);
1066
1067	/*
1068	 * If this unlinked inode is in the middle of recovery, don't
1069	 * drop the inode just yet; log recovery will take care of
1070	 * that.  See the comment for this inode flag.
1071	 */
1072	if (ip->i_flags & XFS_IRECOVERY) {
1073		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
1074		return 0;
1075	}
1076
1077	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1078}
1079
1080STATIC void
1081xfs_free_fsname(
1082	struct xfs_mount	*mp)
1083{
1084	kfree(mp->m_fsname);
 
 
 
 
 
 
 
1085	kfree(mp->m_rtname);
1086	kfree(mp->m_logname);
 
1087}
1088
1089STATIC int
1090xfs_fs_sync_fs(
1091	struct super_block	*sb,
1092	int			wait)
1093{
1094	struct xfs_mount	*mp = XFS_M(sb);
 
 
 
1095
1096	/*
1097	 * Doing anything during the async pass would be counterproductive.
1098	 */
1099	if (!wait)
1100		return 0;
1101
1102	xfs_log_force(mp, XFS_LOG_SYNC);
 
 
 
1103	if (laptop_mode) {
1104		/*
1105		 * The disk must be active because we're syncing.
1106		 * We schedule log work now (now that the disk is
1107		 * active) instead of later (when it might not be).
1108		 */
1109		flush_delayed_work(&mp->m_log->l_work);
1110	}
1111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112	return 0;
1113}
1114
1115STATIC int
1116xfs_fs_statfs(
1117	struct dentry		*dentry,
1118	struct kstatfs		*statp)
1119{
1120	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
1121	xfs_sb_t		*sbp = &mp->m_sb;
1122	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
1123	uint64_t		fakeinos, id;
1124	uint64_t		icount;
1125	uint64_t		ifree;
1126	uint64_t		fdblocks;
1127	xfs_extlen_t		lsize;
1128	int64_t			ffree;
1129
1130	statp->f_type = XFS_SB_MAGIC;
 
 
 
 
 
 
1131	statp->f_namelen = MAXNAMELEN - 1;
1132
1133	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1134	statp->f_fsid.val[0] = (u32)id;
1135	statp->f_fsid.val[1] = (u32)(id >> 32);
1136
1137	icount = percpu_counter_sum(&mp->m_icount);
1138	ifree = percpu_counter_sum(&mp->m_ifree);
1139	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1140
1141	spin_lock(&mp->m_sb_lock);
1142	statp->f_bsize = sbp->sb_blocksize;
1143	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1144	statp->f_blocks = sbp->sb_dblocks - lsize;
1145	spin_unlock(&mp->m_sb_lock);
1146
1147	statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
 
 
1148	statp->f_bavail = statp->f_bfree;
1149
1150	fakeinos = statp->f_bfree << sbp->sb_inopblog;
1151	statp->f_files = MIN(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
1152	if (mp->m_maxicount)
1153		statp->f_files = min_t(typeof(statp->f_files),
1154					statp->f_files,
1155					mp->m_maxicount);
1156
1157	/* If sb_icount overshot maxicount, report actual allocation */
1158	statp->f_files = max_t(typeof(statp->f_files),
1159					statp->f_files,
1160					sbp->sb_icount);
1161
1162	/* make sure statp->f_ffree does not underflow */
1163	ffree = statp->f_files - (icount - ifree);
1164	statp->f_ffree = max_t(int64_t, ffree, 0);
1165
1166
1167	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1168	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1169			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1170		xfs_qm_statvfs(ip, statp);
1171
1172	if (XFS_IS_REALTIME_MOUNT(mp) &&
1173	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
 
 
1174		statp->f_blocks = sbp->sb_rblocks;
1175		statp->f_bavail = statp->f_bfree =
1176			sbp->sb_frextents * sbp->sb_rextsize;
1177	}
1178
1179	return 0;
1180}
1181
1182STATIC void
1183xfs_save_resvblks(struct xfs_mount *mp)
1184{
1185	uint64_t resblks = 0;
1186
1187	mp->m_resblks_save = mp->m_resblks;
1188	xfs_reserve_blocks(mp, &resblks, NULL);
1189}
1190
1191STATIC void
1192xfs_restore_resvblks(struct xfs_mount *mp)
1193{
1194	uint64_t resblks;
1195
1196	if (mp->m_resblks_save) {
1197		resblks = mp->m_resblks_save;
1198		mp->m_resblks_save = 0;
1199	} else
1200		resblks = xfs_default_resblks(mp);
1201
1202	xfs_reserve_blocks(mp, &resblks, NULL);
1203}
1204
1205/*
1206 * Trigger writeback of all the dirty metadata in the file system.
1207 *
1208 * This ensures that the metadata is written to their location on disk rather
1209 * than just existing in transactions in the log. This means after a quiesce
1210 * there is no log replay required to write the inodes to disk - this is the
1211 * primary difference between a sync and a quiesce.
1212 *
1213 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1214 * it is started again when appropriate.
1215 */
1216void
1217xfs_quiesce_attr(
1218	struct xfs_mount	*mp)
1219{
1220	int	error = 0;
1221
1222	/* wait for all modifications to complete */
1223	while (atomic_read(&mp->m_active_trans) > 0)
1224		delay(100);
1225
1226	/* force the log to unpin objects from the now complete transactions */
1227	xfs_log_force(mp, XFS_LOG_SYNC);
1228
1229	/* reclaim inodes to do any IO before the freeze completes */
1230	xfs_reclaim_inodes(mp, 0);
1231	xfs_reclaim_inodes(mp, SYNC_WAIT);
1232
1233	/* Push the superblock and write an unmount record */
1234	error = xfs_log_sbcount(mp);
1235	if (error)
1236		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1237				"Frozen image may not be consistent.");
1238	/*
1239	 * Just warn here till VFS can correctly support
1240	 * read-only remount without racing.
1241	 */
1242	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1243
1244	xfs_log_quiesce(mp);
1245}
1246
1247STATIC int
1248xfs_test_remount_options(
1249	struct super_block	*sb,
1250	char			*options)
1251{
1252	int			error = 0;
1253	struct xfs_mount	*tmp_mp;
1254
1255	tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
1256	if (!tmp_mp)
1257		return -ENOMEM;
1258
1259	tmp_mp->m_super = sb;
1260	error = xfs_parseargs(tmp_mp, options);
1261	xfs_free_fsname(tmp_mp);
1262	kmem_free(tmp_mp);
1263
1264	return error;
1265}
1266
1267STATIC int
1268xfs_fs_remount(
1269	struct super_block	*sb,
1270	int			*flags,
1271	char			*options)
1272{
1273	struct xfs_mount	*mp = XFS_M(sb);
1274	xfs_sb_t		*sbp = &mp->m_sb;
1275	substring_t		args[MAX_OPT_ARGS];
1276	char			*p;
1277	int			error;
1278
1279	/* First, check for complete junk; i.e. invalid options */
1280	error = xfs_test_remount_options(sb, options);
1281	if (error)
1282		return error;
1283
1284	sync_filesystem(sb);
1285	while ((p = strsep(&options, ",")) != NULL) {
1286		int token;
1287
1288		if (!*p)
1289			continue;
1290
1291		token = match_token(p, tokens, args);
1292		switch (token) {
1293		case Opt_barrier:
1294			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
1295			mp->m_flags |= XFS_MOUNT_BARRIER;
1296			break;
1297		case Opt_nobarrier:
1298			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
1299			mp->m_flags &= ~XFS_MOUNT_BARRIER;
1300			break;
1301		case Opt_inode64:
1302			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1303			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1304			break;
1305		case Opt_inode32:
1306			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1307			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1308			break;
1309		default:
1310			/*
1311			 * Logically we would return an error here to prevent
1312			 * users from believing they might have changed
1313			 * mount options using remount which can't be changed.
1314			 *
1315			 * But unfortunately mount(8) adds all options from
1316			 * mtab and fstab to the mount arguments in some cases
1317			 * so we can't blindly reject options, but have to
1318			 * check for each specified option if it actually
1319			 * differs from the currently set option and only
1320			 * reject it if that's the case.
1321			 *
1322			 * Until that is implemented we return success for
1323			 * every remount request, and silently ignore all
1324			 * options that we can't actually change.
1325			 */
1326#if 0
1327			xfs_info(mp,
1328		"mount option \"%s\" not supported for remount", p);
1329			return -EINVAL;
1330#else
1331			break;
1332#endif
1333		}
1334	}
1335
1336	/* ro -> rw */
1337	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
1338		if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1339			xfs_warn(mp,
1340		"ro->rw transition prohibited on norecovery mount");
1341			return -EINVAL;
1342		}
1343
1344		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1345		    xfs_sb_has_ro_compat_feature(sbp,
1346					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1347			xfs_warn(mp,
1348"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1349				(sbp->sb_features_ro_compat &
1350					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1351			return -EINVAL;
1352		}
1353
1354		mp->m_flags &= ~XFS_MOUNT_RDONLY;
1355
1356		/*
1357		 * If this is the first remount to writeable state we
1358		 * might have some superblock changes to update.
1359		 */
1360		if (mp->m_update_sb) {
1361			error = xfs_sync_sb(mp, false);
1362			if (error) {
1363				xfs_warn(mp, "failed to write sb changes");
1364				return error;
1365			}
1366			mp->m_update_sb = false;
1367		}
1368
1369		/*
1370		 * Fill out the reserve pool if it is empty. Use the stashed
1371		 * value if it is non-zero, otherwise go with the default.
1372		 */
1373		xfs_restore_resvblks(mp);
1374		xfs_log_work_queue(mp);
1375		xfs_queue_eofblocks(mp);
1376
1377		/* Recover any CoW blocks that never got remapped. */
1378		error = xfs_reflink_recover_cow(mp);
1379		if (error) {
1380			xfs_err(mp,
1381	"Error %d recovering leftover CoW allocations.", error);
1382			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1383			return error;
1384		}
1385		xfs_queue_cowblocks(mp);
1386
1387		/* Create the per-AG metadata reservation pool .*/
1388		error = xfs_fs_reserve_ag_blocks(mp);
1389		if (error && error != -ENOSPC)
1390			return error;
1391	}
1392
1393	/* rw -> ro */
1394	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
1395		/* Get rid of any leftover CoW reservations... */
1396		cancel_delayed_work_sync(&mp->m_cowblocks_work);
1397		error = xfs_icache_free_cowblocks(mp, NULL);
1398		if (error) {
1399			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1400			return error;
1401		}
1402
1403		/* Free the per-AG metadata reservation pool. */
1404		error = xfs_fs_unreserve_ag_blocks(mp);
1405		if (error) {
1406			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1407			return error;
1408		}
1409
1410		/*
1411		 * Before we sync the metadata, we need to free up the reserve
1412		 * block pool so that the used block count in the superblock on
1413		 * disk is correct at the end of the remount. Stash the current
1414		 * reserve pool size so that if we get remounted rw, we can
1415		 * return it to the same size.
1416		 */
1417		xfs_save_resvblks(mp);
1418
1419		/*
1420		 * Cancel background eofb scanning so it cannot race with the
1421		 * final log force+buftarg wait and deadlock the remount.
1422		 */
1423		cancel_delayed_work_sync(&mp->m_eofblocks_work);
1424
1425		xfs_quiesce_attr(mp);
1426		mp->m_flags |= XFS_MOUNT_RDONLY;
1427	}
1428
1429	return 0;
1430}
1431
1432/*
1433 * Second stage of a freeze. The data is already frozen so we only
1434 * need to take care of the metadata. Once that's done sync the superblock
1435 * to the log to dirty it in case of a crash while frozen. This ensures that we
1436 * will recover the unlinked inode lists on the next mount.
1437 */
1438STATIC int
1439xfs_fs_freeze(
1440	struct super_block	*sb)
1441{
1442	struct xfs_mount	*mp = XFS_M(sb);
 
 
1443
 
 
 
 
 
 
1444	xfs_save_resvblks(mp);
1445	xfs_quiesce_attr(mp);
1446	return xfs_sync_sb(mp, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1447}
1448
1449STATIC int
1450xfs_fs_unfreeze(
1451	struct super_block	*sb)
1452{
1453	struct xfs_mount	*mp = XFS_M(sb);
1454
1455	xfs_restore_resvblks(mp);
1456	xfs_log_work_queue(mp);
1457	return 0;
1458}
1459
1460STATIC int
1461xfs_fs_show_options(
1462	struct seq_file		*m,
1463	struct dentry		*root)
1464{
1465	return xfs_showargs(XFS_M(root->d_sb), m);
 
 
 
 
 
 
1466}
1467
1468/*
1469 * This function fills in xfs_mount_t fields based on mount args.
1470 * Note: the superblock _has_ now been read in.
1471 */
1472STATIC int
1473xfs_finish_flags(
1474	struct xfs_mount	*mp)
1475{
1476	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1477
1478	/* Fail a mount where the logbuf is smaller than the log stripe */
1479	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1480		if (mp->m_logbsize <= 0 &&
1481		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1482			mp->m_logbsize = mp->m_sb.sb_logsunit;
1483		} else if (mp->m_logbsize > 0 &&
1484			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1485			xfs_warn(mp,
1486		"logbuf size must be greater than or equal to log stripe size");
1487			return -EINVAL;
1488		}
1489	} else {
1490		/* Fail a mount if the logbuf is larger than 32K */
1491		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1492			xfs_warn(mp,
1493		"logbuf size for version 1 logs must be 16K or 32K");
1494			return -EINVAL;
1495		}
1496	}
1497
1498	/*
1499	 * V5 filesystems always use attr2 format for attributes.
1500	 */
1501	if (xfs_sb_version_hascrc(&mp->m_sb) &&
1502	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1503		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1504			     "attr2 is always enabled for V5 filesystems.");
1505		return -EINVAL;
1506	}
1507
1508	/*
1509	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1510	 * told by noattr2 to turn it off
1511	 */
1512	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1513	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
1514		mp->m_flags |= XFS_MOUNT_ATTR2;
1515
1516	/*
1517	 * prohibit r/w mounts of read-only filesystems
1518	 */
1519	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1520		xfs_warn(mp,
1521			"cannot mount a read-only filesystem as read-write");
1522		return -EROFS;
1523	}
1524
1525	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1526	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1527	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1528		xfs_warn(mp,
1529		  "Super block does not support project and group quota together");
1530		return -EINVAL;
1531	}
1532
1533	return 0;
1534}
1535
1536static int
1537xfs_init_percpu_counters(
1538	struct xfs_mount	*mp)
1539{
1540	int		error;
1541
1542	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1543	if (error)
1544		return -ENOMEM;
1545
1546	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1547	if (error)
1548		goto free_icount;
1549
1550	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1551	if (error)
1552		goto free_ifree;
1553
 
 
 
 
 
 
 
 
1554	return 0;
1555
 
 
 
 
1556free_ifree:
1557	percpu_counter_destroy(&mp->m_ifree);
1558free_icount:
1559	percpu_counter_destroy(&mp->m_icount);
1560	return -ENOMEM;
1561}
1562
1563void
1564xfs_reinit_percpu_counters(
1565	struct xfs_mount	*mp)
1566{
1567	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1568	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1569	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
 
1570}
1571
1572static void
1573xfs_destroy_percpu_counters(
1574	struct xfs_mount	*mp)
1575{
1576	percpu_counter_destroy(&mp->m_icount);
1577	percpu_counter_destroy(&mp->m_ifree);
1578	percpu_counter_destroy(&mp->m_fdblocks);
 
 
 
 
1579}
1580
1581static struct xfs_mount *
1582xfs_mount_alloc(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1583	struct super_block	*sb)
1584{
1585	struct xfs_mount	*mp;
1586
1587	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1588	if (!mp)
1589		return NULL;
1590
1591	mp->m_super = sb;
1592	spin_lock_init(&mp->m_sb_lock);
1593	spin_lock_init(&mp->m_agirotor_lock);
1594	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1595	spin_lock_init(&mp->m_perag_lock);
1596	mutex_init(&mp->m_growlock);
1597	atomic_set(&mp->m_active_trans, 0);
1598	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1599	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1600	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1601	mp->m_kobj.kobject.kset = xfs_kset;
1602	return mp;
1603}
1604
 
 
 
 
 
 
 
 
 
 
1605
1606STATIC int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1607xfs_fs_fill_super(
1608	struct super_block	*sb,
1609	void			*data,
1610	int			silent)
1611{
 
1612	struct inode		*root;
1613	struct xfs_mount	*mp = NULL;
1614	int			flags = 0, error = -ENOMEM;
 
1615
1616	/*
1617	 * allocate mp and do all low-level struct initializations before we
1618	 * attach it to the super
1619	 */
1620	mp = xfs_mount_alloc(sb);
1621	if (!mp)
1622		goto out;
1623	sb->s_fs_info = mp;
 
 
 
1624
1625	error = xfs_parseargs(mp, (char *)data);
1626	if (error)
1627		goto out_free_fsname;
1628
1629	sb_min_blocksize(sb, BBSIZE);
1630	sb->s_xattr = xfs_xattr_handlers;
1631	sb->s_export_op = &xfs_export_operations;
1632#ifdef CONFIG_XFS_QUOTA
1633	sb->s_qcop = &xfs_quotactl_operations;
1634	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1635#endif
1636	sb->s_op = &xfs_super_operations;
1637
1638	if (silent)
 
 
 
 
 
 
 
 
 
 
 
1639		flags |= XFS_MFSI_QUIET;
1640
1641	error = xfs_open_devices(mp);
1642	if (error)
1643		goto out_free_fsname;
 
 
 
 
 
 
 
1644
1645	error = xfs_init_mount_workqueues(mp);
1646	if (error)
1647		goto out_close_devices;
1648
1649	error = xfs_init_percpu_counters(mp);
1650	if (error)
1651		goto out_destroy_workqueues;
1652
 
 
 
 
1653	/* Allocate stats memory before we do operations that might use it */
1654	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1655	if (!mp->m_stats.xs_stats) {
1656		error = -ENOMEM;
1657		goto out_destroy_counters;
1658	}
1659
1660	error = xfs_readsb(mp, flags);
1661	if (error)
1662		goto out_free_stats;
1663
 
 
 
 
1664	error = xfs_finish_flags(mp);
1665	if (error)
1666		goto out_free_sb;
1667
1668	error = xfs_setup_devices(mp);
1669	if (error)
1670		goto out_free_sb;
1671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1672	error = xfs_filestream_mount(mp);
1673	if (error)
1674		goto out_free_sb;
1675
1676	/*
1677	 * we must configure the block size in the superblock before we run the
1678	 * full mount process as the mount process can lookup and cache inodes.
1679	 */
1680	sb->s_magic = XFS_SB_MAGIC;
1681	sb->s_blocksize = mp->m_sb.sb_blocksize;
1682	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1683	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1684	sb->s_max_links = XFS_MAXLINK;
1685	sb->s_time_gran = 1;
 
 
 
 
 
 
 
 
 
 
1686	set_posix_acl_flag(sb);
1687
1688	/* version 5 superblocks support inode version counters. */
1689	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1690		sb->s_flags |= SB_I_VERSION;
1691
1692	if (mp->m_flags & XFS_MOUNT_DAX) {
 
 
 
 
 
 
1693		xfs_warn(mp,
1694		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
 
 
1695
1696		error = bdev_dax_supported(sb, sb->s_blocksize);
1697		if (error) {
1698			xfs_alert(mp,
1699			"DAX unsupported by block device. Turning off DAX.");
1700			mp->m_flags &= ~XFS_MOUNT_DAX;
1701		}
1702		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1703			xfs_alert(mp,
1704		"DAX and reflink cannot be used together!");
1705			error = -EINVAL;
1706			goto out_filestream_unmount;
1707		}
1708	}
1709
1710	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1711		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1712
1713		if (!blk_queue_discard(q)) {
1714			xfs_warn(mp, "mounting with \"discard\" option, but "
1715					"the device does not support discard");
1716			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1717		}
1718	}
1719
1720	if (xfs_sb_version_hasreflink(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1721		xfs_alert(mp,
1722	"reflink not compatible with realtime device!");
1723		error = -EINVAL;
1724		goto out_filestream_unmount;
1725	}
1726
1727	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1728		xfs_alert(mp,
1729	"reverse mapping btree not compatible with realtime device!");
1730		error = -EINVAL;
1731		goto out_filestream_unmount;
1732	}
1733
1734	error = xfs_mountfs(mp);
1735	if (error)
1736		goto out_filestream_unmount;
1737
1738	root = igrab(VFS_I(mp->m_rootip));
1739	if (!root) {
1740		error = -ENOENT;
1741		goto out_unmount;
1742	}
1743	sb->s_root = d_make_root(root);
1744	if (!sb->s_root) {
1745		error = -ENOMEM;
1746		goto out_unmount;
1747	}
1748
1749	return 0;
1750
1751 out_filestream_unmount:
1752	xfs_filestream_unmount(mp);
1753 out_free_sb:
1754	xfs_freesb(mp);
 
 
1755 out_free_stats:
1756	free_percpu(mp->m_stats.xs_stats);
 
 
1757 out_destroy_counters:
1758	xfs_destroy_percpu_counters(mp);
1759 out_destroy_workqueues:
1760	xfs_destroy_mount_workqueues(mp);
1761 out_close_devices:
1762	xfs_close_devices(mp);
1763 out_free_fsname:
1764	xfs_free_fsname(mp);
1765	kfree(mp);
1766 out:
1767	return error;
1768
1769 out_unmount:
1770	xfs_filestream_unmount(mp);
1771	xfs_unmountfs(mp);
1772	goto out_free_sb;
1773}
1774
1775STATIC void
1776xfs_fs_put_super(
1777	struct super_block	*sb)
1778{
1779	struct xfs_mount	*mp = XFS_M(sb);
 
1780
1781	xfs_notice(mp, "Unmounting Filesystem");
1782	xfs_filestream_unmount(mp);
1783	xfs_unmountfs(mp);
 
 
 
1784
1785	xfs_freesb(mp);
1786	free_percpu(mp->m_stats.xs_stats);
1787	xfs_destroy_percpu_counters(mp);
1788	xfs_destroy_mount_workqueues(mp);
1789	xfs_close_devices(mp);
1790	xfs_free_fsname(mp);
1791	kfree(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1792}
1793
1794STATIC struct dentry *
1795xfs_fs_mount(
1796	struct file_system_type	*fs_type,
1797	int			flags,
1798	const char		*dev_name,
1799	void			*data)
1800{
1801	return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1802}
1803
1804static long
1805xfs_fs_nr_cached_objects(
1806	struct super_block	*sb,
1807	struct shrink_control	*sc)
 
 
 
 
 
 
 
 
 
 
 
1808{
1809	return xfs_reclaim_inodes_count(XFS_M(sb));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810}
1811
1812static long
1813xfs_fs_free_cached_objects(
1814	struct super_block	*sb,
1815	struct shrink_control	*sc)
1816{
1817	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
 
 
 
 
 
 
 
 
 
1818}
1819
1820static const struct super_operations xfs_super_operations = {
1821	.alloc_inode		= xfs_fs_alloc_inode,
1822	.destroy_inode		= xfs_fs_destroy_inode,
1823	.dirty_inode		= xfs_fs_dirty_inode,
1824	.drop_inode		= xfs_fs_drop_inode,
1825	.put_super		= xfs_fs_put_super,
1826	.sync_fs		= xfs_fs_sync_fs,
1827	.freeze_fs		= xfs_fs_freeze,
1828	.unfreeze_fs		= xfs_fs_unfreeze,
1829	.statfs			= xfs_fs_statfs,
1830	.remount_fs		= xfs_fs_remount,
1831	.show_options		= xfs_fs_show_options,
1832	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1833	.free_cached_objects	= xfs_fs_free_cached_objects,
1834};
1835
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1836static struct file_system_type xfs_fs_type = {
1837	.owner			= THIS_MODULE,
1838	.name			= "xfs",
1839	.mount			= xfs_fs_mount,
1840	.kill_sb		= kill_block_super,
1841	.fs_flags		= FS_REQUIRES_DEV,
 
1842};
1843MODULE_ALIAS_FS("xfs");
1844
1845STATIC int __init
1846xfs_init_zones(void)
1847{
1848	xfs_ioend_bioset = bioset_create(4 * MAX_BUF_PER_PAGE,
1849			offsetof(struct xfs_ioend, io_inline_bio),
1850			BIOSET_NEED_BVECS);
1851	if (!xfs_ioend_bioset)
 
 
 
 
1852		goto out;
1853
1854	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1855						"xfs_log_ticket");
1856	if (!xfs_log_ticket_zone)
1857		goto out_free_ioend_bioset;
1858
1859	xfs_bmap_free_item_zone = kmem_zone_init(
1860			sizeof(struct xfs_extent_free_item),
1861			"xfs_bmap_free_item");
1862	if (!xfs_bmap_free_item_zone)
1863		goto out_destroy_log_ticket_zone;
1864
1865	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1866						"xfs_btree_cur");
1867	if (!xfs_btree_cur_zone)
1868		goto out_destroy_bmap_free_item_zone;
1869
1870	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1871						"xfs_da_state");
1872	if (!xfs_da_state_zone)
1873		goto out_destroy_btree_cur_zone;
1874
1875	xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1876	if (!xfs_ifork_zone)
1877		goto out_destroy_da_state_zone;
1878
1879	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1880	if (!xfs_trans_zone)
1881		goto out_destroy_ifork_zone;
1882
1883	xfs_log_item_desc_zone =
1884		kmem_zone_init(sizeof(struct xfs_log_item_desc),
1885			       "xfs_log_item_desc");
1886	if (!xfs_log_item_desc_zone)
1887		goto out_destroy_trans_zone;
1888
1889	/*
1890	 * The size of the zone allocated buf log item is the maximum
1891	 * size possible under XFS.  This wastes a little bit of memory,
1892	 * but it is much faster.
1893	 */
1894	xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1895					   "xfs_buf_item");
1896	if (!xfs_buf_item_zone)
1897		goto out_destroy_log_item_desc_zone;
1898
1899	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1900			((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1901				 sizeof(xfs_extent_t))), "xfs_efd_item");
1902	if (!xfs_efd_zone)
1903		goto out_destroy_buf_item_zone;
1904
1905	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1906			((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1907				sizeof(xfs_extent_t))), "xfs_efi_item");
1908	if (!xfs_efi_zone)
1909		goto out_destroy_efd_zone;
1910
1911	xfs_inode_zone =
1912		kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1913			KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
1914			KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
1915	if (!xfs_inode_zone)
1916		goto out_destroy_efi_zone;
1917
1918	xfs_ili_zone =
1919		kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1920					KM_ZONE_SPREAD, NULL);
1921	if (!xfs_ili_zone)
1922		goto out_destroy_inode_zone;
1923	xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1924					"xfs_icr");
1925	if (!xfs_icreate_zone)
1926		goto out_destroy_ili_zone;
1927
1928	xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
1929			"xfs_rud_item");
1930	if (!xfs_rud_zone)
1931		goto out_destroy_icreate_zone;
 
 
 
 
 
 
 
1932
1933	xfs_rui_zone = kmem_zone_init(
1934			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1935			"xfs_rui_item");
1936	if (!xfs_rui_zone)
1937		goto out_destroy_rud_zone;
1938
1939	xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
1940			"xfs_cud_item");
1941	if (!xfs_cud_zone)
1942		goto out_destroy_rui_zone;
 
1943
1944	xfs_cui_zone = kmem_zone_init(
1945			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1946			"xfs_cui_item");
1947	if (!xfs_cui_zone)
1948		goto out_destroy_cud_zone;
1949
1950	xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
1951			"xfs_bud_item");
1952	if (!xfs_bud_zone)
1953		goto out_destroy_cui_zone;
 
1954
1955	xfs_bui_zone = kmem_zone_init(
1956			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1957			"xfs_bui_item");
1958	if (!xfs_bui_zone)
1959		goto out_destroy_bud_zone;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1960
1961	return 0;
1962
1963 out_destroy_bud_zone:
1964	kmem_zone_destroy(xfs_bud_zone);
1965 out_destroy_cui_zone:
1966	kmem_zone_destroy(xfs_cui_zone);
1967 out_destroy_cud_zone:
1968	kmem_zone_destroy(xfs_cud_zone);
1969 out_destroy_rui_zone:
1970	kmem_zone_destroy(xfs_rui_zone);
1971 out_destroy_rud_zone:
1972	kmem_zone_destroy(xfs_rud_zone);
1973 out_destroy_icreate_zone:
1974	kmem_zone_destroy(xfs_icreate_zone);
1975 out_destroy_ili_zone:
1976	kmem_zone_destroy(xfs_ili_zone);
1977 out_destroy_inode_zone:
1978	kmem_zone_destroy(xfs_inode_zone);
1979 out_destroy_efi_zone:
1980	kmem_zone_destroy(xfs_efi_zone);
1981 out_destroy_efd_zone:
1982	kmem_zone_destroy(xfs_efd_zone);
1983 out_destroy_buf_item_zone:
1984	kmem_zone_destroy(xfs_buf_item_zone);
1985 out_destroy_log_item_desc_zone:
1986	kmem_zone_destroy(xfs_log_item_desc_zone);
1987 out_destroy_trans_zone:
1988	kmem_zone_destroy(xfs_trans_zone);
1989 out_destroy_ifork_zone:
1990	kmem_zone_destroy(xfs_ifork_zone);
1991 out_destroy_da_state_zone:
1992	kmem_zone_destroy(xfs_da_state_zone);
1993 out_destroy_btree_cur_zone:
1994	kmem_zone_destroy(xfs_btree_cur_zone);
1995 out_destroy_bmap_free_item_zone:
1996	kmem_zone_destroy(xfs_bmap_free_item_zone);
1997 out_destroy_log_ticket_zone:
1998	kmem_zone_destroy(xfs_log_ticket_zone);
1999 out_free_ioend_bioset:
2000	bioset_free(xfs_ioend_bioset);
 
 
 
 
2001 out:
2002	return -ENOMEM;
2003}
2004
2005STATIC void
2006xfs_destroy_zones(void)
2007{
2008	/*
2009	 * Make sure all delayed rcu free are flushed before we
2010	 * destroy caches.
2011	 */
2012	rcu_barrier();
2013	kmem_zone_destroy(xfs_bui_zone);
2014	kmem_zone_destroy(xfs_bud_zone);
2015	kmem_zone_destroy(xfs_cui_zone);
2016	kmem_zone_destroy(xfs_cud_zone);
2017	kmem_zone_destroy(xfs_rui_zone);
2018	kmem_zone_destroy(xfs_rud_zone);
2019	kmem_zone_destroy(xfs_icreate_zone);
2020	kmem_zone_destroy(xfs_ili_zone);
2021	kmem_zone_destroy(xfs_inode_zone);
2022	kmem_zone_destroy(xfs_efi_zone);
2023	kmem_zone_destroy(xfs_efd_zone);
2024	kmem_zone_destroy(xfs_buf_item_zone);
2025	kmem_zone_destroy(xfs_log_item_desc_zone);
2026	kmem_zone_destroy(xfs_trans_zone);
2027	kmem_zone_destroy(xfs_ifork_zone);
2028	kmem_zone_destroy(xfs_da_state_zone);
2029	kmem_zone_destroy(xfs_btree_cur_zone);
2030	kmem_zone_destroy(xfs_bmap_free_item_zone);
2031	kmem_zone_destroy(xfs_log_ticket_zone);
2032	bioset_free(xfs_ioend_bioset);
 
 
2033}
2034
2035STATIC int __init
2036xfs_init_workqueues(void)
2037{
2038	/*
2039	 * The allocation workqueue can be used in memory reclaim situations
2040	 * (writepage path), and parallelism is only limited by the number of
2041	 * AGs in all the filesystems mounted. Hence use the default large
2042	 * max_active value for this workqueue.
2043	 */
2044	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2045			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2046	if (!xfs_alloc_wq)
2047		return -ENOMEM;
2048
2049	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
 
2050	if (!xfs_discard_wq)
2051		goto out_free_alloc_wq;
2052
2053	return 0;
2054out_free_alloc_wq:
2055	destroy_workqueue(xfs_alloc_wq);
2056	return -ENOMEM;
2057}
2058
2059STATIC void
2060xfs_destroy_workqueues(void)
2061{
2062	destroy_workqueue(xfs_discard_wq);
2063	destroy_workqueue(xfs_alloc_wq);
2064}
2065
2066STATIC int __init
2067init_xfs_fs(void)
2068{
2069	int			error;
2070
2071	xfs_check_ondisk_structs();
2072
 
 
 
 
2073	printk(KERN_INFO XFS_VERSION_STRING " with "
2074			 XFS_BUILD_OPTIONS " enabled\n");
2075
2076	xfs_extent_free_init_defer_op();
2077	xfs_rmap_update_init_defer_op();
2078	xfs_refcount_update_init_defer_op();
2079	xfs_bmap_update_init_defer_op();
2080
2081	xfs_dir_startup();
2082
2083	error = xfs_init_zones();
2084	if (error)
2085		goto out;
2086
2087	error = xfs_init_workqueues();
2088	if (error)
2089		goto out_destroy_zones;
2090
2091	error = xfs_mru_cache_init();
2092	if (error)
2093		goto out_destroy_wq;
2094
2095	error = xfs_buf_init();
2096	if (error)
2097		goto out_mru_cache_uninit;
2098
2099	error = xfs_init_procfs();
2100	if (error)
2101		goto out_buf_terminate;
2102
2103	error = xfs_sysctl_register();
2104	if (error)
2105		goto out_cleanup_procfs;
2106
 
 
2107	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2108	if (!xfs_kset) {
2109		error = -ENOMEM;
2110		goto out_sysctl_unregister;
2111	}
2112
2113	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2114
2115	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2116	if (!xfsstats.xs_stats) {
2117		error = -ENOMEM;
2118		goto out_kset_unregister;
2119	}
2120
2121	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2122			       "stats");
2123	if (error)
2124		goto out_free_stats;
2125
 
 
 
 
2126#ifdef DEBUG
2127	xfs_dbg_kobj.kobject.kset = xfs_kset;
2128	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2129	if (error)
2130		goto out_remove_stats_kobj;
2131#endif
2132
2133	error = xfs_qm_init();
2134	if (error)
2135		goto out_remove_dbg_kobj;
2136
2137	error = register_filesystem(&xfs_fs_type);
2138	if (error)
2139		goto out_qm_exit;
2140	return 0;
2141
2142 out_qm_exit:
2143	xfs_qm_exit();
2144 out_remove_dbg_kobj:
2145#ifdef DEBUG
2146	xfs_sysfs_del(&xfs_dbg_kobj);
2147 out_remove_stats_kobj:
2148#endif
 
 
2149	xfs_sysfs_del(&xfsstats.xs_kobj);
2150 out_free_stats:
2151	free_percpu(xfsstats.xs_stats);
2152 out_kset_unregister:
2153	kset_unregister(xfs_kset);
2154 out_sysctl_unregister:
 
2155	xfs_sysctl_unregister();
2156 out_cleanup_procfs:
2157	xfs_cleanup_procfs();
2158 out_buf_terminate:
2159	xfs_buf_terminate();
2160 out_mru_cache_uninit:
2161	xfs_mru_cache_uninit();
2162 out_destroy_wq:
2163	xfs_destroy_workqueues();
2164 out_destroy_zones:
2165	xfs_destroy_zones();
2166 out:
2167	return error;
2168}
2169
2170STATIC void __exit
2171exit_xfs_fs(void)
2172{
2173	xfs_qm_exit();
2174	unregister_filesystem(&xfs_fs_type);
2175#ifdef DEBUG
2176	xfs_sysfs_del(&xfs_dbg_kobj);
2177#endif
 
2178	xfs_sysfs_del(&xfsstats.xs_kobj);
2179	free_percpu(xfsstats.xs_stats);
2180	kset_unregister(xfs_kset);
 
2181	xfs_sysctl_unregister();
2182	xfs_cleanup_procfs();
2183	xfs_buf_terminate();
2184	xfs_mru_cache_uninit();
2185	xfs_destroy_workqueues();
2186	xfs_destroy_zones();
2187	xfs_uuid_table_free();
2188}
2189
2190module_init(init_xfs_fs);
2191module_exit(exit_xfs_fs);
2192
2193MODULE_AUTHOR("Silicon Graphics, Inc.");
2194MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2195MODULE_LICENSE("GPL");
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include "xfs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_sb.h"
  13#include "xfs_mount.h"
 
  14#include "xfs_inode.h"
  15#include "xfs_btree.h"
  16#include "xfs_bmap.h"
  17#include "xfs_alloc.h"
 
  18#include "xfs_fsops.h"
  19#include "xfs_trans.h"
  20#include "xfs_buf_item.h"
  21#include "xfs_log.h"
  22#include "xfs_log_priv.h"
 
  23#include "xfs_dir2.h"
  24#include "xfs_extfree_item.h"
  25#include "xfs_mru_cache.h"
  26#include "xfs_inode_item.h"
  27#include "xfs_icache.h"
  28#include "xfs_trace.h"
  29#include "xfs_icreate_item.h"
  30#include "xfs_filestream.h"
  31#include "xfs_quota.h"
  32#include "xfs_sysfs.h"
  33#include "xfs_ondisk.h"
  34#include "xfs_rmap_item.h"
  35#include "xfs_refcount_item.h"
  36#include "xfs_bmap_item.h"
  37#include "xfs_reflink.h"
  38#include "xfs_pwork.h"
  39#include "xfs_ag.h"
  40#include "xfs_defer.h"
  41#include "xfs_attr_item.h"
  42#include "xfs_xattr.h"
  43#include "xfs_iunlink_item.h"
  44#include "xfs_dahash_test.h"
  45#include "xfs_rtbitmap.h"
  46#include "scrub/stats.h"
  47
  48#include <linux/magic.h>
  49#include <linux/fs_context.h>
  50#include <linux/fs_parser.h>
  51
  52static const struct super_operations xfs_super_operations;
 
  53
  54static struct dentry *xfs_debugfs;	/* top-level xfs debugfs dir */
  55static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
  56#ifdef DEBUG
  57static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
  58#endif
  59
  60enum xfs_dax_mode {
  61	XFS_DAX_INODE = 0,
  62	XFS_DAX_ALWAYS = 1,
  63	XFS_DAX_NEVER = 2,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64};
  65
  66static void
  67xfs_mount_set_dax_mode(
  68	struct xfs_mount	*mp,
  69	enum xfs_dax_mode	mode)
  70{
  71	switch (mode) {
  72	case XFS_DAX_INODE:
  73		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
  74		break;
  75	case XFS_DAX_ALWAYS:
  76		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
  77		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
  78		break;
  79	case XFS_DAX_NEVER:
  80		mp->m_features |= XFS_FEAT_DAX_NEVER;
  81		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
  82		break;
 
 
 
 
 
 
 
 
  83	}
 
 
 
 
 
 
  84}
  85
  86static const struct constant_table dax_param_enums[] = {
  87	{"inode",	XFS_DAX_INODE },
  88	{"always",	XFS_DAX_ALWAYS },
  89	{"never",	XFS_DAX_NEVER },
  90	{}
  91};
  92
  93/*
  94 * Table driven mount option parser.
 
 
 
 
 
 
 
 
  95 */
  96enum {
  97	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
  98	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
  99	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
 100	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
 101	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
 102	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
 103	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
 104	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
 105	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
 106};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 107
 108static const struct fs_parameter_spec xfs_fs_parameters[] = {
 109	fsparam_u32("logbufs",		Opt_logbufs),
 110	fsparam_string("logbsize",	Opt_logbsize),
 111	fsparam_string("logdev",	Opt_logdev),
 112	fsparam_string("rtdev",		Opt_rtdev),
 113	fsparam_flag("wsync",		Opt_wsync),
 114	fsparam_flag("noalign",		Opt_noalign),
 115	fsparam_flag("swalloc",		Opt_swalloc),
 116	fsparam_u32("sunit",		Opt_sunit),
 117	fsparam_u32("swidth",		Opt_swidth),
 118	fsparam_flag("nouuid",		Opt_nouuid),
 119	fsparam_flag("grpid",		Opt_grpid),
 120	fsparam_flag("nogrpid",		Opt_nogrpid),
 121	fsparam_flag("bsdgroups",	Opt_bsdgroups),
 122	fsparam_flag("sysvgroups",	Opt_sysvgroups),
 123	fsparam_string("allocsize",	Opt_allocsize),
 124	fsparam_flag("norecovery",	Opt_norecovery),
 125	fsparam_flag("inode64",		Opt_inode64),
 126	fsparam_flag("inode32",		Opt_inode32),
 127	fsparam_flag("ikeep",		Opt_ikeep),
 128	fsparam_flag("noikeep",		Opt_noikeep),
 129	fsparam_flag("largeio",		Opt_largeio),
 130	fsparam_flag("nolargeio",	Opt_nolargeio),
 131	fsparam_flag("attr2",		Opt_attr2),
 132	fsparam_flag("noattr2",		Opt_noattr2),
 133	fsparam_flag("filestreams",	Opt_filestreams),
 134	fsparam_flag("quota",		Opt_quota),
 135	fsparam_flag("noquota",		Opt_noquota),
 136	fsparam_flag("usrquota",	Opt_usrquota),
 137	fsparam_flag("grpquota",	Opt_grpquota),
 138	fsparam_flag("prjquota",	Opt_prjquota),
 139	fsparam_flag("uquota",		Opt_uquota),
 140	fsparam_flag("gquota",		Opt_gquota),
 141	fsparam_flag("pquota",		Opt_pquota),
 142	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
 143	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
 144	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
 145	fsparam_flag("qnoenforce",	Opt_qnoenforce),
 146	fsparam_flag("discard",		Opt_discard),
 147	fsparam_flag("nodiscard",	Opt_nodiscard),
 148	fsparam_flag("dax",		Opt_dax),
 149	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
 150	{}
 151};
 152
 153struct proc_xfs_info {
 154	uint64_t	flag;
 155	char		*str;
 156};
 157
 158static int
 159xfs_fs_show_options(
 160	struct seq_file		*m,
 161	struct dentry		*root)
 162{
 163	static struct proc_xfs_info xfs_info_set[] = {
 164		/* the few simple ones we can get from the mount struct */
 165		{ XFS_FEAT_IKEEP,		",ikeep" },
 166		{ XFS_FEAT_WSYNC,		",wsync" },
 167		{ XFS_FEAT_NOALIGN,		",noalign" },
 168		{ XFS_FEAT_SWALLOC,		",swalloc" },
 169		{ XFS_FEAT_NOUUID,		",nouuid" },
 170		{ XFS_FEAT_NORECOVERY,		",norecovery" },
 171		{ XFS_FEAT_ATTR2,		",attr2" },
 172		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
 173		{ XFS_FEAT_GRPID,		",grpid" },
 174		{ XFS_FEAT_DISCARD,		",discard" },
 175		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
 176		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
 177		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
 
 
 
 
 
 
 178		{ 0, NULL }
 179	};
 180	struct xfs_mount	*mp = XFS_M(root->d_sb);
 181	struct proc_xfs_info	*xfs_infop;
 182
 183	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 184		if (mp->m_features & xfs_infop->flag)
 
 
 
 
 185			seq_puts(m, xfs_infop->str);
 186	}
 187
 188	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
 189
 190	if (xfs_has_allocsize(mp))
 191		seq_printf(m, ",allocsize=%dk",
 192			   (1 << mp->m_allocsize_log) >> 10);
 193
 194	if (mp->m_logbufs > 0)
 195		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
 196	if (mp->m_logbsize > 0)
 197		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
 198
 199	if (mp->m_logname)
 200		seq_show_option(m, "logdev", mp->m_logname);
 201	if (mp->m_rtname)
 202		seq_show_option(m, "rtdev", mp->m_rtname);
 203
 204	if (mp->m_dalign > 0)
 205		seq_printf(m, ",sunit=%d",
 206				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 207	if (mp->m_swidth > 0)
 208		seq_printf(m, ",swidth=%d",
 209				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 210
 211	if (mp->m_qflags & XFS_UQUOTA_ENFD)
 212		seq_puts(m, ",usrquota");
 213	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
 214		seq_puts(m, ",uqnoenforce");
 215
 216	if (mp->m_qflags & XFS_PQUOTA_ENFD)
 217		seq_puts(m, ",prjquota");
 218	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
 219		seq_puts(m, ",pqnoenforce");
 220
 221	if (mp->m_qflags & XFS_GQUOTA_ENFD)
 222		seq_puts(m, ",grpquota");
 223	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
 224		seq_puts(m, ",gqnoenforce");
 
 
 
 225
 226	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 227		seq_puts(m, ",noquota");
 228
 229	return 0;
 230}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231
 232static bool
 233xfs_set_inode_alloc_perag(
 234	struct xfs_perag	*pag,
 235	xfs_ino_t		ino,
 236	xfs_agnumber_t		max_metadata)
 237{
 238	if (!xfs_is_inode32(pag->pag_mount)) {
 239		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
 240		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 241		return false;
 242	}
 243
 244	if (ino > XFS_MAXINUMBER_32) {
 245		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
 246		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 247		return false;
 248	}
 249
 250	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
 251	if (pag->pag_agno < max_metadata)
 252		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 253	else
 254		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 255	return true;
 256}
 257
 258/*
 259 * Set parameters for inode allocation heuristics, taking into account
 260 * filesystem size and inode32/inode64 mount options; i.e. specifically
 261 * whether or not XFS_FEAT_SMALL_INUMS is set.
 262 *
 263 * Inode allocation patterns are altered only if inode32 is requested
 264 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
 265 * If altered, XFS_OPSTATE_INODE32 is set as well.
 266 *
 267 * An agcount independent of that in the mount structure is provided
 268 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 269 * to the potentially higher ag count.
 270 *
 271 * Returns the maximum AG index which may contain inodes.
 272 */
 273xfs_agnumber_t
 274xfs_set_inode_alloc(
 275	struct xfs_mount *mp,
 276	xfs_agnumber_t	agcount)
 277{
 278	xfs_agnumber_t	index;
 279	xfs_agnumber_t	maxagi = 0;
 280	xfs_sb_t	*sbp = &mp->m_sb;
 281	xfs_agnumber_t	max_metadata;
 282	xfs_agino_t	agino;
 283	xfs_ino_t	ino;
 284
 285	/*
 286	 * Calculate how much should be reserved for inodes to meet
 287	 * the max inode percentage.  Used only for inode32.
 288	 */
 289	if (M_IGEO(mp)->maxicount) {
 290		uint64_t	icount;
 291
 292		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 293		do_div(icount, 100);
 294		icount += sbp->sb_agblocks - 1;
 295		do_div(icount, sbp->sb_agblocks);
 296		max_metadata = icount;
 297	} else {
 298		max_metadata = agcount;
 299	}
 300
 301	/* Get the last possible inode in the filesystem */
 302	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
 303	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 304
 305	/*
 306	 * If user asked for no more than 32-bit inodes, and the fs is
 307	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
 308	 * the allocator to accommodate the request.
 309	 */
 310	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
 311		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
 312	else
 313		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
 314
 315	for (index = 0; index < agcount; index++) {
 316		struct xfs_perag	*pag;
 317
 318		ino = XFS_AGINO_TO_INO(mp, index, agino);
 319
 320		pag = xfs_perag_get(mp, index);
 321		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
 322			maxagi++;
 323		xfs_perag_put(pag);
 324	}
 325
 326	return xfs_is_inode32(mp) ? maxagi : agcount;
 327}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328
 329static int
 330xfs_setup_dax_always(
 331	struct xfs_mount	*mp)
 332{
 333	if (!mp->m_ddev_targp->bt_daxdev &&
 334	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
 335		xfs_alert(mp,
 336			"DAX unsupported by block device. Turning off DAX.");
 337		goto disable_dax;
 338	}
 339
 340	if (mp->m_super->s_blocksize != PAGE_SIZE) {
 341		xfs_alert(mp,
 342			"DAX not supported for blocksize. Turning off DAX.");
 343		goto disable_dax;
 344	}
 345
 346	if (xfs_has_reflink(mp) &&
 347	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
 348		xfs_alert(mp,
 349			"DAX and reflink cannot work with multi-partitions!");
 350		return -EINVAL;
 351	}
 352
 353	return 0;
 354
 355disable_dax:
 356	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
 357	return 0;
 358}
 359
 360STATIC int
 361xfs_blkdev_get(
 362	xfs_mount_t		*mp,
 363	const char		*name,
 364	struct bdev_handle	**handlep)
 365{
 366	int			error = 0;
 367
 368	*handlep = bdev_open_by_path(name,
 369		BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
 370		mp->m_super, &fs_holder_ops);
 371	if (IS_ERR(*handlep)) {
 372		error = PTR_ERR(*handlep);
 373		*handlep = NULL;
 374		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
 375	}
 376
 377	return error;
 378}
 379
 380STATIC void
 381xfs_shutdown_devices(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 382	struct xfs_mount	*mp)
 383{
 384	/*
 385	 * Udev is triggered whenever anyone closes a block device or unmounts
 386	 * a file systemm on a block device.
 387	 * The default udev rules invoke blkid to read the fs super and create
 388	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
 389	 * reads through the page cache.
 390	 *
 391	 * xfs_db also uses buffered reads to examine metadata.  There is no
 392	 * coordination between xfs_db and udev, which means that they can run
 393	 * concurrently.  Note there is no coordination between the kernel and
 394	 * blkid either.
 395	 *
 396	 * On a system with 64k pages, the page cache can cache the superblock
 397	 * and the root inode (and hence the root directory) with the same 64k
 398	 * page.  If udev spawns blkid after the mkfs and the system is busy
 399	 * enough that it is still running when xfs_db starts up, they'll both
 400	 * read from the same page in the pagecache.
 401	 *
 402	 * The unmount writes updated inode metadata to disk directly.  The XFS
 403	 * buffer cache does not use the bdev pagecache, so it needs to
 404	 * invalidate that pagecache on unmount.  If the above scenario occurs,
 405	 * the pagecache no longer reflects what's on disk, xfs_db reads the
 406	 * stale metadata, and fails to find /a.  Most of the time this succeeds
 407	 * because closing a bdev invalidates the page cache, but when processes
 408	 * race, everyone loses.
 409	 */
 410	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 411		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
 412		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
 
 
 
 
 413	}
 414	if (mp->m_rtdev_targp) {
 415		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
 416		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
 
 
 
 
 417	}
 418	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 419	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
 420}
 421
 422/*
 423 * The file system configurations are:
 424 *	(1) device (partition) with data and internal log
 425 *	(2) logical volume with data and log subvolumes.
 426 *	(3) logical volume with data, log, and realtime subvolumes.
 427 *
 428 * We only have to handle opening the log and realtime volumes here if
 429 * they are present.  The data subvolume has already been opened by
 430 * get_sb_bdev() and is stored in sb->s_bdev.
 431 */
 432STATIC int
 433xfs_open_devices(
 434	struct xfs_mount	*mp)
 435{
 436	struct super_block	*sb = mp->m_super;
 437	struct block_device	*ddev = sb->s_bdev;
 438	struct bdev_handle	*logdev_handle = NULL, *rtdev_handle = NULL;
 
 439	int			error;
 440
 441	/*
 442	 * Open real time and log devices - order is important.
 443	 */
 444	if (mp->m_logname) {
 445		error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle);
 446		if (error)
 447			return error;
 
 448	}
 449
 450	if (mp->m_rtname) {
 451		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_handle);
 452		if (error)
 453			goto out_close_logdev;
 454
 455		if (rtdev_handle->bdev == ddev ||
 456		    (logdev_handle &&
 457		     rtdev_handle->bdev == logdev_handle->bdev)) {
 458			xfs_warn(mp,
 459	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 460			error = -EINVAL;
 461			goto out_close_rtdev;
 462		}
 
 463	}
 464
 465	/*
 466	 * Setup xfs_mount buffer target pointers
 467	 */
 468	error = -ENOMEM;
 469	mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_handle);
 470	if (!mp->m_ddev_targp)
 471		goto out_close_rtdev;
 472
 473	if (rtdev_handle) {
 474		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_handle);
 475		if (!mp->m_rtdev_targp)
 476			goto out_free_ddev_targ;
 477	}
 478
 479	if (logdev_handle && logdev_handle->bdev != ddev) {
 480		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_handle);
 481		if (!mp->m_logdev_targp)
 482			goto out_free_rtdev_targ;
 483	} else {
 484		mp->m_logdev_targp = mp->m_ddev_targp;
 485		/* Handle won't be used, drop it */
 486		if (logdev_handle)
 487			bdev_release(logdev_handle);
 488	}
 489
 490	return 0;
 491
 492 out_free_rtdev_targ:
 493	if (mp->m_rtdev_targp)
 494		xfs_free_buftarg(mp->m_rtdev_targp);
 495 out_free_ddev_targ:
 496	xfs_free_buftarg(mp->m_ddev_targp);
 497 out_close_rtdev:
 498	 if (rtdev_handle)
 499		bdev_release(rtdev_handle);
 500 out_close_logdev:
 501	if (logdev_handle)
 502		bdev_release(logdev_handle);
 
 
 
 
 503	return error;
 504}
 505
 506/*
 507 * Setup xfs_mount buffer target pointers based on superblock
 508 */
 509STATIC int
 510xfs_setup_devices(
 511	struct xfs_mount	*mp)
 512{
 513	int			error;
 514
 515	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
 516	if (error)
 517		return error;
 518
 519	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 520		unsigned int	log_sector_size = BBSIZE;
 521
 522		if (xfs_has_sector(mp))
 523			log_sector_size = mp->m_sb.sb_logsectsize;
 524		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 525					    log_sector_size);
 526		if (error)
 527			return error;
 528	}
 529	if (mp->m_rtdev_targp) {
 530		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 531					    mp->m_sb.sb_sectsize);
 532		if (error)
 533			return error;
 534	}
 535
 536	return 0;
 537}
 538
 539STATIC int
 540xfs_init_mount_workqueues(
 541	struct xfs_mount	*mp)
 542{
 543	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
 544			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 545			1, mp->m_super->s_id);
 546	if (!mp->m_buf_workqueue)
 547		goto out;
 548
 
 
 
 
 
 549	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
 550			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 551			0, mp->m_super->s_id);
 552	if (!mp->m_unwritten_workqueue)
 553		goto out_destroy_buf;
 
 
 
 
 
 554
 555	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
 556			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 557			0, mp->m_super->s_id);
 558	if (!mp->m_reclaim_workqueue)
 559		goto out_destroy_unwritten;
 560
 561	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
 562			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
 563			0, mp->m_super->s_id);
 564	if (!mp->m_blockgc_wq)
 565		goto out_destroy_reclaim;
 566
 567	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
 568			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 569			1, mp->m_super->s_id);
 570	if (!mp->m_inodegc_wq)
 571		goto out_destroy_blockgc;
 572
 573	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
 574			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
 575	if (!mp->m_sync_workqueue)
 576		goto out_destroy_inodegc;
 577
 578	return 0;
 579
 580out_destroy_inodegc:
 581	destroy_workqueue(mp->m_inodegc_wq);
 582out_destroy_blockgc:
 583	destroy_workqueue(mp->m_blockgc_wq);
 584out_destroy_reclaim:
 585	destroy_workqueue(mp->m_reclaim_workqueue);
 
 
 586out_destroy_unwritten:
 587	destroy_workqueue(mp->m_unwritten_workqueue);
 
 
 588out_destroy_buf:
 589	destroy_workqueue(mp->m_buf_workqueue);
 590out:
 591	return -ENOMEM;
 592}
 593
 594STATIC void
 595xfs_destroy_mount_workqueues(
 596	struct xfs_mount	*mp)
 597{
 598	destroy_workqueue(mp->m_sync_workqueue);
 599	destroy_workqueue(mp->m_blockgc_wq);
 600	destroy_workqueue(mp->m_inodegc_wq);
 601	destroy_workqueue(mp->m_reclaim_workqueue);
 
 
 602	destroy_workqueue(mp->m_unwritten_workqueue);
 603	destroy_workqueue(mp->m_buf_workqueue);
 604}
 605
 606static void
 607xfs_flush_inodes_worker(
 608	struct work_struct	*work)
 609{
 610	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
 611						   m_flush_inodes_work);
 612	struct super_block	*sb = mp->m_super;
 613
 614	if (down_read_trylock(&sb->s_umount)) {
 615		sync_inodes_sb(sb);
 616		up_read(&sb->s_umount);
 617	}
 618}
 619
 620/*
 621 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 622 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 623 * for IO to complete so that we effectively throttle multiple callers to the
 624 * rate at which IO is completing.
 625 */
 626void
 627xfs_flush_inodes(
 628	struct xfs_mount	*mp)
 629{
 630	/*
 631	 * If flush_work() returns true then that means we waited for a flush
 632	 * which was already in progress.  Don't bother running another scan.
 633	 */
 634	if (flush_work(&mp->m_flush_inodes_work))
 635		return;
 636
 637	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
 638	flush_work(&mp->m_flush_inodes_work);
 
 
 639}
 640
 641/* Catch misguided souls that try to use this interface on XFS */
 642STATIC struct inode *
 643xfs_fs_alloc_inode(
 644	struct super_block	*sb)
 645{
 646	BUG();
 647	return NULL;
 648}
 649
 650/*
 651 * Now that the generic code is guaranteed not to be accessing
 652 * the linux inode, we can inactivate and reclaim the inode.
 653 */
 654STATIC void
 655xfs_fs_destroy_inode(
 656	struct inode		*inode)
 657{
 658	struct xfs_inode	*ip = XFS_I(inode);
 659
 660	trace_xfs_destroy_inode(ip);
 661
 662	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 663	XFS_STATS_INC(ip->i_mount, vn_rele);
 664	XFS_STATS_INC(ip->i_mount, vn_remove);
 665	xfs_inode_mark_reclaimable(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666}
 667
 668static void
 669xfs_fs_dirty_inode(
 670	struct inode			*inode,
 671	int				flags)
 672{
 673	struct xfs_inode		*ip = XFS_I(inode);
 674	struct xfs_mount		*mp = ip->i_mount;
 675	struct xfs_trans		*tp;
 676
 677	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
 678		return;
 679
 680	/*
 681	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
 682	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
 683	 * in flags possibly together with I_DIRTY_SYNC.
 684	 */
 685	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
 686		return;
 687
 688	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
 689		return;
 690	xfs_ilock(ip, XFS_ILOCK_EXCL);
 691	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 692	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
 693	xfs_trans_commit(tp);
 694}
 695
 696/*
 697 * Slab object creation initialisation for the XFS inode.
 698 * This covers only the idempotent fields in the XFS inode;
 699 * all other fields need to be initialised on allocation
 700 * from the slab. This avoids the need to repeatedly initialise
 701 * fields in the xfs inode that left in the initialise state
 702 * when freeing the inode.
 703 */
 704STATIC void
 705xfs_fs_inode_init_once(
 706	void			*inode)
 707{
 708	struct xfs_inode	*ip = inode;
 709
 710	memset(ip, 0, sizeof(struct xfs_inode));
 711
 712	/* vfs inode */
 713	inode_init_once(VFS_I(ip));
 714
 715	/* xfs inode */
 716	atomic_set(&ip->i_pincount, 0);
 717	spin_lock_init(&ip->i_flags_lock);
 718
 
 
 719	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
 720		     "xfsino", ip->i_ino);
 721}
 722
 723/*
 724 * We do an unlocked check for XFS_IDONTCACHE here because we are already
 725 * serialised against cache hits here via the inode->i_lock and igrab() in
 726 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
 727 * racing with us, and it avoids needing to grab a spinlock here for every inode
 728 * we drop the final reference on.
 729 */
 730STATIC int
 731xfs_fs_drop_inode(
 732	struct inode		*inode)
 733{
 734	struct xfs_inode	*ip = XFS_I(inode);
 735
 736	/*
 737	 * If this unlinked inode is in the middle of recovery, don't
 738	 * drop the inode just yet; log recovery will take care of
 739	 * that.  See the comment for this inode flag.
 740	 */
 741	if (ip->i_flags & XFS_IRECOVERY) {
 742		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
 743		return 0;
 744	}
 745
 746	return generic_drop_inode(inode);
 747}
 748
 749static void
 750xfs_mount_free(
 751	struct xfs_mount	*mp)
 752{
 753	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
 754		xfs_free_buftarg(mp->m_logdev_targp);
 755	if (mp->m_rtdev_targp)
 756		xfs_free_buftarg(mp->m_rtdev_targp);
 757	if (mp->m_ddev_targp)
 758		xfs_free_buftarg(mp->m_ddev_targp);
 759
 760	debugfs_remove(mp->m_debugfs);
 761	kfree(mp->m_rtname);
 762	kfree(mp->m_logname);
 763	kmem_free(mp);
 764}
 765
 766STATIC int
 767xfs_fs_sync_fs(
 768	struct super_block	*sb,
 769	int			wait)
 770{
 771	struct xfs_mount	*mp = XFS_M(sb);
 772	int			error;
 773
 774	trace_xfs_fs_sync_fs(mp, __return_address);
 775
 776	/*
 777	 * Doing anything during the async pass would be counterproductive.
 778	 */
 779	if (!wait)
 780		return 0;
 781
 782	error = xfs_log_force(mp, XFS_LOG_SYNC);
 783	if (error)
 784		return error;
 785
 786	if (laptop_mode) {
 787		/*
 788		 * The disk must be active because we're syncing.
 789		 * We schedule log work now (now that the disk is
 790		 * active) instead of later (when it might not be).
 791		 */
 792		flush_delayed_work(&mp->m_log->l_work);
 793	}
 794
 795	/*
 796	 * If we are called with page faults frozen out, it means we are about
 797	 * to freeze the transaction subsystem. Take the opportunity to shut
 798	 * down inodegc because once SB_FREEZE_FS is set it's too late to
 799	 * prevent inactivation races with freeze. The fs doesn't get called
 800	 * again by the freezing process until after SB_FREEZE_FS has been set,
 801	 * so it's now or never.  Same logic applies to speculative allocation
 802	 * garbage collection.
 803	 *
 804	 * We don't care if this is a normal syncfs call that does this or
 805	 * freeze that does this - we can run this multiple times without issue
 806	 * and we won't race with a restart because a restart can only occur
 807	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
 808	 */
 809	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
 810		xfs_inodegc_stop(mp);
 811		xfs_blockgc_stop(mp);
 812	}
 813
 814	return 0;
 815}
 816
 817STATIC int
 818xfs_fs_statfs(
 819	struct dentry		*dentry,
 820	struct kstatfs		*statp)
 821{
 822	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
 823	xfs_sb_t		*sbp = &mp->m_sb;
 824	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
 825	uint64_t		fakeinos, id;
 826	uint64_t		icount;
 827	uint64_t		ifree;
 828	uint64_t		fdblocks;
 829	xfs_extlen_t		lsize;
 830	int64_t			ffree;
 831
 832	/*
 833	 * Expedite background inodegc but don't wait. We do not want to block
 834	 * here waiting hours for a billion extent file to be truncated.
 835	 */
 836	xfs_inodegc_push(mp);
 837
 838	statp->f_type = XFS_SUPER_MAGIC;
 839	statp->f_namelen = MAXNAMELEN - 1;
 840
 841	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
 842	statp->f_fsid = u64_to_fsid(id);
 
 843
 844	icount = percpu_counter_sum(&mp->m_icount);
 845	ifree = percpu_counter_sum(&mp->m_ifree);
 846	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
 847
 848	spin_lock(&mp->m_sb_lock);
 849	statp->f_bsize = sbp->sb_blocksize;
 850	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
 851	statp->f_blocks = sbp->sb_dblocks - lsize;
 852	spin_unlock(&mp->m_sb_lock);
 853
 854	/* make sure statp->f_bfree does not underflow */
 855	statp->f_bfree = max_t(int64_t, 0,
 856				fdblocks - xfs_fdblocks_unavailable(mp));
 857	statp->f_bavail = statp->f_bfree;
 858
 859	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
 860	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
 861	if (M_IGEO(mp)->maxicount)
 862		statp->f_files = min_t(typeof(statp->f_files),
 863					statp->f_files,
 864					M_IGEO(mp)->maxicount);
 865
 866	/* If sb_icount overshot maxicount, report actual allocation */
 867	statp->f_files = max_t(typeof(statp->f_files),
 868					statp->f_files,
 869					sbp->sb_icount);
 870
 871	/* make sure statp->f_ffree does not underflow */
 872	ffree = statp->f_files - (icount - ifree);
 873	statp->f_ffree = max_t(int64_t, ffree, 0);
 874
 875
 876	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
 877	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
 878			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
 879		xfs_qm_statvfs(ip, statp);
 880
 881	if (XFS_IS_REALTIME_MOUNT(mp) &&
 882	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
 883		s64	freertx;
 884
 885		statp->f_blocks = sbp->sb_rblocks;
 886		freertx = percpu_counter_sum_positive(&mp->m_frextents);
 887		statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
 888	}
 889
 890	return 0;
 891}
 892
 893STATIC void
 894xfs_save_resvblks(struct xfs_mount *mp)
 895{
 
 
 896	mp->m_resblks_save = mp->m_resblks;
 897	xfs_reserve_blocks(mp, 0);
 898}
 899
 900STATIC void
 901xfs_restore_resvblks(struct xfs_mount *mp)
 902{
 903	uint64_t resblks;
 904
 905	if (mp->m_resblks_save) {
 906		resblks = mp->m_resblks_save;
 907		mp->m_resblks_save = 0;
 908	} else
 909		resblks = xfs_default_resblks(mp);
 910
 911	xfs_reserve_blocks(mp, resblks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 912}
 913
 914/*
 915 * Second stage of a freeze. The data is already frozen so we only
 916 * need to take care of the metadata. Once that's done sync the superblock
 917 * to the log to dirty it in case of a crash while frozen. This ensures that we
 918 * will recover the unlinked inode lists on the next mount.
 919 */
 920STATIC int
 921xfs_fs_freeze(
 922	struct super_block	*sb)
 923{
 924	struct xfs_mount	*mp = XFS_M(sb);
 925	unsigned int		flags;
 926	int			ret;
 927
 928	/*
 929	 * The filesystem is now frozen far enough that memory reclaim
 930	 * cannot safely operate on the filesystem. Hence we need to
 931	 * set a GFP_NOFS context here to avoid recursion deadlocks.
 932	 */
 933	flags = memalloc_nofs_save();
 934	xfs_save_resvblks(mp);
 935	ret = xfs_log_quiesce(mp);
 936	memalloc_nofs_restore(flags);
 937
 938	/*
 939	 * For read-write filesystems, we need to restart the inodegc on error
 940	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
 941	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
 942	 * here, so we can restart safely without racing with a stop in
 943	 * xfs_fs_sync_fs().
 944	 */
 945	if (ret && !xfs_is_readonly(mp)) {
 946		xfs_blockgc_start(mp);
 947		xfs_inodegc_start(mp);
 948	}
 949
 950	return ret;
 951}
 952
 953STATIC int
 954xfs_fs_unfreeze(
 955	struct super_block	*sb)
 956{
 957	struct xfs_mount	*mp = XFS_M(sb);
 958
 959	xfs_restore_resvblks(mp);
 960	xfs_log_work_queue(mp);
 
 
 961
 962	/*
 963	 * Don't reactivate the inodegc worker on a readonly filesystem because
 964	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
 965	 * worker because there are no speculative preallocations on a readonly
 966	 * filesystem.
 967	 */
 968	if (!xfs_is_readonly(mp)) {
 969		xfs_blockgc_start(mp);
 970		xfs_inodegc_start(mp);
 971	}
 972
 973	return 0;
 974}
 975
 976/*
 977 * This function fills in xfs_mount_t fields based on mount args.
 978 * Note: the superblock _has_ now been read in.
 979 */
 980STATIC int
 981xfs_finish_flags(
 982	struct xfs_mount	*mp)
 983{
 
 
 984	/* Fail a mount where the logbuf is smaller than the log stripe */
 985	if (xfs_has_logv2(mp)) {
 986		if (mp->m_logbsize <= 0 &&
 987		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
 988			mp->m_logbsize = mp->m_sb.sb_logsunit;
 989		} else if (mp->m_logbsize > 0 &&
 990			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
 991			xfs_warn(mp,
 992		"logbuf size must be greater than or equal to log stripe size");
 993			return -EINVAL;
 994		}
 995	} else {
 996		/* Fail a mount if the logbuf is larger than 32K */
 997		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
 998			xfs_warn(mp,
 999		"logbuf size for version 1 logs must be 16K or 32K");
1000			return -EINVAL;
1001		}
1002	}
1003
1004	/*
1005	 * V5 filesystems always use attr2 format for attributes.
1006	 */
1007	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
 
1008		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1009			     "attr2 is always enabled for V5 filesystems.");
1010		return -EINVAL;
1011	}
1012
1013	/*
 
 
 
 
 
 
 
 
1014	 * prohibit r/w mounts of read-only filesystems
1015	 */
1016	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1017		xfs_warn(mp,
1018			"cannot mount a read-only filesystem as read-write");
1019		return -EROFS;
1020	}
1021
1022	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1023	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1024	    !xfs_has_pquotino(mp)) {
1025		xfs_warn(mp,
1026		  "Super block does not support project and group quota together");
1027		return -EINVAL;
1028	}
1029
1030	return 0;
1031}
1032
1033static int
1034xfs_init_percpu_counters(
1035	struct xfs_mount	*mp)
1036{
1037	int		error;
1038
1039	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1040	if (error)
1041		return -ENOMEM;
1042
1043	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1044	if (error)
1045		goto free_icount;
1046
1047	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1048	if (error)
1049		goto free_ifree;
1050
1051	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1052	if (error)
1053		goto free_fdblocks;
1054
1055	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1056	if (error)
1057		goto free_delalloc;
1058
1059	return 0;
1060
1061free_delalloc:
1062	percpu_counter_destroy(&mp->m_delalloc_blks);
1063free_fdblocks:
1064	percpu_counter_destroy(&mp->m_fdblocks);
1065free_ifree:
1066	percpu_counter_destroy(&mp->m_ifree);
1067free_icount:
1068	percpu_counter_destroy(&mp->m_icount);
1069	return -ENOMEM;
1070}
1071
1072void
1073xfs_reinit_percpu_counters(
1074	struct xfs_mount	*mp)
1075{
1076	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1077	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1078	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1079	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1080}
1081
1082static void
1083xfs_destroy_percpu_counters(
1084	struct xfs_mount	*mp)
1085{
1086	percpu_counter_destroy(&mp->m_icount);
1087	percpu_counter_destroy(&mp->m_ifree);
1088	percpu_counter_destroy(&mp->m_fdblocks);
1089	ASSERT(xfs_is_shutdown(mp) ||
1090	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1091	percpu_counter_destroy(&mp->m_delalloc_blks);
1092	percpu_counter_destroy(&mp->m_frextents);
1093}
1094
1095static int
1096xfs_inodegc_init_percpu(
1097	struct xfs_mount	*mp)
1098{
1099	struct xfs_inodegc	*gc;
1100	int			cpu;
1101
1102	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1103	if (!mp->m_inodegc)
1104		return -ENOMEM;
1105
1106	for_each_possible_cpu(cpu) {
1107		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1108		gc->cpu = cpu;
1109		gc->mp = mp;
1110		init_llist_head(&gc->list);
1111		gc->items = 0;
1112		gc->error = 0;
1113		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1114	}
1115	return 0;
1116}
1117
1118static void
1119xfs_inodegc_free_percpu(
1120	struct xfs_mount	*mp)
1121{
1122	if (!mp->m_inodegc)
1123		return;
1124	free_percpu(mp->m_inodegc);
1125}
1126
1127static void
1128xfs_fs_put_super(
1129	struct super_block	*sb)
1130{
1131	struct xfs_mount	*mp = XFS_M(sb);
1132
1133	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1134	xfs_filestream_unmount(mp);
1135	xfs_unmountfs(mp);
1136
1137	xfs_freesb(mp);
1138	xchk_mount_stats_free(mp);
1139	free_percpu(mp->m_stats.xs_stats);
1140	xfs_inodegc_free_percpu(mp);
1141	xfs_destroy_percpu_counters(mp);
1142	xfs_destroy_mount_workqueues(mp);
1143	xfs_shutdown_devices(mp);
 
 
 
 
 
1144}
1145
1146static long
1147xfs_fs_nr_cached_objects(
1148	struct super_block	*sb,
1149	struct shrink_control	*sc)
1150{
1151	/* Paranoia: catch incorrect calls during mount setup or teardown */
1152	if (WARN_ON_ONCE(!sb->s_fs_info))
1153		return 0;
1154	return xfs_reclaim_inodes_count(XFS_M(sb));
1155}
1156
1157static long
1158xfs_fs_free_cached_objects(
1159	struct super_block	*sb,
1160	struct shrink_control	*sc)
1161{
1162	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1163}
1164
1165static void
1166xfs_fs_shutdown(
1167	struct super_block	*sb)
1168{
1169	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1170}
1171
1172static const struct super_operations xfs_super_operations = {
1173	.alloc_inode		= xfs_fs_alloc_inode,
1174	.destroy_inode		= xfs_fs_destroy_inode,
1175	.dirty_inode		= xfs_fs_dirty_inode,
1176	.drop_inode		= xfs_fs_drop_inode,
1177	.put_super		= xfs_fs_put_super,
1178	.sync_fs		= xfs_fs_sync_fs,
1179	.freeze_fs		= xfs_fs_freeze,
1180	.unfreeze_fs		= xfs_fs_unfreeze,
1181	.statfs			= xfs_fs_statfs,
1182	.show_options		= xfs_fs_show_options,
1183	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1184	.free_cached_objects	= xfs_fs_free_cached_objects,
1185	.shutdown		= xfs_fs_shutdown,
1186};
1187
1188static int
1189suffix_kstrtoint(
1190	const char	*s,
1191	unsigned int	base,
1192	int		*res)
1193{
1194	int		last, shift_left_factor = 0, _res;
1195	char		*value;
1196	int		ret = 0;
1197
1198	value = kstrdup(s, GFP_KERNEL);
1199	if (!value)
1200		return -ENOMEM;
1201
1202	last = strlen(value) - 1;
1203	if (value[last] == 'K' || value[last] == 'k') {
1204		shift_left_factor = 10;
1205		value[last] = '\0';
1206	}
1207	if (value[last] == 'M' || value[last] == 'm') {
1208		shift_left_factor = 20;
1209		value[last] = '\0';
1210	}
1211	if (value[last] == 'G' || value[last] == 'g') {
1212		shift_left_factor = 30;
1213		value[last] = '\0';
1214	}
1215
1216	if (kstrtoint(value, base, &_res))
1217		ret = -EINVAL;
1218	kfree(value);
1219	*res = _res << shift_left_factor;
1220	return ret;
1221}
1222
1223static inline void
1224xfs_fs_warn_deprecated(
1225	struct fs_context	*fc,
1226	struct fs_parameter	*param,
1227	uint64_t		flag,
1228	bool			value)
1229{
1230	/* Don't print the warning if reconfiguring and current mount point
1231	 * already had the flag set
1232	 */
1233	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1234            !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1235		return;
1236	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1237}
1238
1239/*
1240 * Set mount state from a mount option.
1241 *
1242 * NOTE: mp->m_super is NULL here!
1243 */
1244static int
1245xfs_fs_parse_param(
1246	struct fs_context	*fc,
1247	struct fs_parameter	*param)
1248{
1249	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1250	struct fs_parse_result	result;
1251	int			size = 0;
1252	int			opt;
1253
1254	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1255	if (opt < 0)
1256		return opt;
1257
1258	switch (opt) {
1259	case Opt_logbufs:
1260		parsing_mp->m_logbufs = result.uint_32;
1261		return 0;
1262	case Opt_logbsize:
1263		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1264			return -EINVAL;
1265		return 0;
1266	case Opt_logdev:
1267		kfree(parsing_mp->m_logname);
1268		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1269		if (!parsing_mp->m_logname)
1270			return -ENOMEM;
1271		return 0;
1272	case Opt_rtdev:
1273		kfree(parsing_mp->m_rtname);
1274		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1275		if (!parsing_mp->m_rtname)
1276			return -ENOMEM;
1277		return 0;
1278	case Opt_allocsize:
1279		if (suffix_kstrtoint(param->string, 10, &size))
1280			return -EINVAL;
1281		parsing_mp->m_allocsize_log = ffs(size) - 1;
1282		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1283		return 0;
1284	case Opt_grpid:
1285	case Opt_bsdgroups:
1286		parsing_mp->m_features |= XFS_FEAT_GRPID;
1287		return 0;
1288	case Opt_nogrpid:
1289	case Opt_sysvgroups:
1290		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1291		return 0;
1292	case Opt_wsync:
1293		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1294		return 0;
1295	case Opt_norecovery:
1296		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1297		return 0;
1298	case Opt_noalign:
1299		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1300		return 0;
1301	case Opt_swalloc:
1302		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1303		return 0;
1304	case Opt_sunit:
1305		parsing_mp->m_dalign = result.uint_32;
1306		return 0;
1307	case Opt_swidth:
1308		parsing_mp->m_swidth = result.uint_32;
1309		return 0;
1310	case Opt_inode32:
1311		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1312		return 0;
1313	case Opt_inode64:
1314		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1315		return 0;
1316	case Opt_nouuid:
1317		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1318		return 0;
1319	case Opt_largeio:
1320		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1321		return 0;
1322	case Opt_nolargeio:
1323		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1324		return 0;
1325	case Opt_filestreams:
1326		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1327		return 0;
1328	case Opt_noquota:
1329		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1330		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1331		return 0;
1332	case Opt_quota:
1333	case Opt_uquota:
1334	case Opt_usrquota:
1335		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1336		return 0;
1337	case Opt_qnoenforce:
1338	case Opt_uqnoenforce:
1339		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1340		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1341		return 0;
1342	case Opt_pquota:
1343	case Opt_prjquota:
1344		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1345		return 0;
1346	case Opt_pqnoenforce:
1347		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1348		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1349		return 0;
1350	case Opt_gquota:
1351	case Opt_grpquota:
1352		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1353		return 0;
1354	case Opt_gqnoenforce:
1355		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1356		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1357		return 0;
1358	case Opt_discard:
1359		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1360		return 0;
1361	case Opt_nodiscard:
1362		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1363		return 0;
1364#ifdef CONFIG_FS_DAX
1365	case Opt_dax:
1366		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1367		return 0;
1368	case Opt_dax_enum:
1369		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1370		return 0;
1371#endif
1372	/* Following mount options will be removed in September 2025 */
1373	case Opt_ikeep:
1374		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1375		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1376		return 0;
1377	case Opt_noikeep:
1378		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1379		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1380		return 0;
1381	case Opt_attr2:
1382		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1383		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1384		return 0;
1385	case Opt_noattr2:
1386		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1387		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1388		return 0;
1389	default:
1390		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1391		return -EINVAL;
1392	}
1393
1394	return 0;
1395}
1396
1397static int
1398xfs_fs_validate_params(
1399	struct xfs_mount	*mp)
1400{
1401	/* No recovery flag requires a read-only mount */
1402	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1403		xfs_warn(mp, "no-recovery mounts must be read-only.");
1404		return -EINVAL;
1405	}
1406
1407	/*
1408	 * We have not read the superblock at this point, so only the attr2
1409	 * mount option can set the attr2 feature by this stage.
1410	 */
1411	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1412		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1413		return -EINVAL;
1414	}
1415
1416
1417	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1418		xfs_warn(mp,
1419	"sunit and swidth options incompatible with the noalign option");
1420		return -EINVAL;
1421	}
1422
1423	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1424		xfs_warn(mp, "quota support not available in this kernel.");
1425		return -EINVAL;
1426	}
1427
1428	if ((mp->m_dalign && !mp->m_swidth) ||
1429	    (!mp->m_dalign && mp->m_swidth)) {
1430		xfs_warn(mp, "sunit and swidth must be specified together");
1431		return -EINVAL;
1432	}
1433
1434	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1435		xfs_warn(mp,
1436	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1437			mp->m_swidth, mp->m_dalign);
1438		return -EINVAL;
1439	}
1440
1441	if (mp->m_logbufs != -1 &&
1442	    mp->m_logbufs != 0 &&
1443	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1444	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1445		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1446			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1447		return -EINVAL;
1448	}
1449
1450	if (mp->m_logbsize != -1 &&
1451	    mp->m_logbsize !=  0 &&
1452	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1453	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1454	     !is_power_of_2(mp->m_logbsize))) {
1455		xfs_warn(mp,
1456			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1457			mp->m_logbsize);
1458		return -EINVAL;
1459	}
1460
1461	if (xfs_has_allocsize(mp) &&
1462	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1463	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1464		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1465			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1466		return -EINVAL;
1467	}
1468
1469	return 0;
1470}
1471
1472struct dentry *
1473xfs_debugfs_mkdir(
1474	const char	*name,
1475	struct dentry	*parent)
1476{
1477	struct dentry	*child;
1478
1479	/* Apparently we're expected to ignore error returns?? */
1480	child = debugfs_create_dir(name, parent);
1481	if (IS_ERR(child))
1482		return NULL;
1483
1484	return child;
1485}
1486
1487static int
1488xfs_fs_fill_super(
1489	struct super_block	*sb,
1490	struct fs_context	*fc)
 
1491{
1492	struct xfs_mount	*mp = sb->s_fs_info;
1493	struct inode		*root;
1494	int			flags = 0, error;
1495
1496	mp->m_super = sb;
1497
1498	/*
1499	 * Copy VFS mount flags from the context now that all parameter parsing
1500	 * is guaranteed to have been completed by either the old mount API or
1501	 * the newer fsopen/fsconfig API.
1502	 */
1503	if (fc->sb_flags & SB_RDONLY)
1504		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1505	if (fc->sb_flags & SB_DIRSYNC)
1506		mp->m_features |= XFS_FEAT_DIRSYNC;
1507	if (fc->sb_flags & SB_SYNCHRONOUS)
1508		mp->m_features |= XFS_FEAT_WSYNC;
1509
1510	error = xfs_fs_validate_params(mp);
1511	if (error)
1512		return error;
1513
1514	sb_min_blocksize(sb, BBSIZE);
1515	sb->s_xattr = xfs_xattr_handlers;
1516	sb->s_export_op = &xfs_export_operations;
1517#ifdef CONFIG_XFS_QUOTA
1518	sb->s_qcop = &xfs_quotactl_operations;
1519	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1520#endif
1521	sb->s_op = &xfs_super_operations;
1522
1523	/*
1524	 * Delay mount work if the debug hook is set. This is debug
1525	 * instrumention to coordinate simulation of xfs mount failures with
1526	 * VFS superblock operations
1527	 */
1528	if (xfs_globals.mount_delay) {
1529		xfs_notice(mp, "Delaying mount for %d seconds.",
1530			xfs_globals.mount_delay);
1531		msleep(xfs_globals.mount_delay * 1000);
1532	}
1533
1534	if (fc->sb_flags & SB_SILENT)
1535		flags |= XFS_MFSI_QUIET;
1536
1537	error = xfs_open_devices(mp);
1538	if (error)
1539		return error;
1540
1541	if (xfs_debugfs) {
1542		mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1543						  xfs_debugfs);
1544	} else {
1545		mp->m_debugfs = NULL;
1546	}
1547
1548	error = xfs_init_mount_workqueues(mp);
1549	if (error)
1550		goto out_shutdown_devices;
1551
1552	error = xfs_init_percpu_counters(mp);
1553	if (error)
1554		goto out_destroy_workqueues;
1555
1556	error = xfs_inodegc_init_percpu(mp);
1557	if (error)
1558		goto out_destroy_counters;
1559
1560	/* Allocate stats memory before we do operations that might use it */
1561	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1562	if (!mp->m_stats.xs_stats) {
1563		error = -ENOMEM;
1564		goto out_destroy_inodegc;
1565	}
1566
1567	error = xchk_mount_stats_alloc(mp);
1568	if (error)
1569		goto out_free_stats;
1570
1571	error = xfs_readsb(mp, flags);
1572	if (error)
1573		goto out_free_scrub_stats;
1574
1575	error = xfs_finish_flags(mp);
1576	if (error)
1577		goto out_free_sb;
1578
1579	error = xfs_setup_devices(mp);
1580	if (error)
1581		goto out_free_sb;
1582
1583	/* V4 support is undergoing deprecation. */
1584	if (!xfs_has_crc(mp)) {
1585#ifdef CONFIG_XFS_SUPPORT_V4
1586		xfs_warn_once(mp,
1587	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1588#else
1589		xfs_warn(mp,
1590	"Deprecated V4 format (crc=0) not supported by kernel.");
1591		error = -EINVAL;
1592		goto out_free_sb;
1593#endif
1594	}
1595
1596	/* ASCII case insensitivity is undergoing deprecation. */
1597	if (xfs_has_asciici(mp)) {
1598#ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1599		xfs_warn_once(mp,
1600	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1601#else
1602		xfs_warn(mp,
1603	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1604		error = -EINVAL;
1605		goto out_free_sb;
1606#endif
1607	}
1608
1609	/* Filesystem claims it needs repair, so refuse the mount. */
1610	if (xfs_has_needsrepair(mp)) {
1611		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1612		error = -EFSCORRUPTED;
1613		goto out_free_sb;
1614	}
1615
1616	/*
1617	 * Don't touch the filesystem if a user tool thinks it owns the primary
1618	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1619	 * we don't check them at all.
1620	 */
1621	if (mp->m_sb.sb_inprogress) {
1622		xfs_warn(mp, "Offline file system operation in progress!");
1623		error = -EFSCORRUPTED;
1624		goto out_free_sb;
1625	}
1626
1627	/*
1628	 * Until this is fixed only page-sized or smaller data blocks work.
1629	 */
1630	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1631		xfs_warn(mp,
1632		"File system with blocksize %d bytes. "
1633		"Only pagesize (%ld) or less will currently work.",
1634				mp->m_sb.sb_blocksize, PAGE_SIZE);
1635		error = -ENOSYS;
1636		goto out_free_sb;
1637	}
1638
1639	/* Ensure this filesystem fits in the page cache limits */
1640	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1641	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1642		xfs_warn(mp,
1643		"file system too large to be mounted on this system.");
1644		error = -EFBIG;
1645		goto out_free_sb;
1646	}
1647
1648	/*
1649	 * XFS block mappings use 54 bits to store the logical block offset.
1650	 * This should suffice to handle the maximum file size that the VFS
1651	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1652	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1653	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1654	 * to check this assertion.
1655	 *
1656	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1657	 * maximum pagecache offset in units of fs blocks.
1658	 */
1659	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1660		xfs_warn(mp,
1661"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1662			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1663			 XFS_MAX_FILEOFF);
1664		error = -EINVAL;
1665		goto out_free_sb;
1666	}
1667
1668	error = xfs_filestream_mount(mp);
1669	if (error)
1670		goto out_free_sb;
1671
1672	/*
1673	 * we must configure the block size in the superblock before we run the
1674	 * full mount process as the mount process can lookup and cache inodes.
1675	 */
1676	sb->s_magic = XFS_SUPER_MAGIC;
1677	sb->s_blocksize = mp->m_sb.sb_blocksize;
1678	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1679	sb->s_maxbytes = MAX_LFS_FILESIZE;
1680	sb->s_max_links = XFS_MAXLINK;
1681	sb->s_time_gran = 1;
1682	if (xfs_has_bigtime(mp)) {
1683		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1684		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1685	} else {
1686		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1687		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1688	}
1689	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1690	sb->s_iflags |= SB_I_CGROUPWB;
1691
1692	set_posix_acl_flag(sb);
1693
1694	/* version 5 superblocks support inode version counters. */
1695	if (xfs_has_crc(mp))
1696		sb->s_flags |= SB_I_VERSION;
1697
1698	if (xfs_has_dax_always(mp)) {
1699		error = xfs_setup_dax_always(mp);
1700		if (error)
1701			goto out_filestream_unmount;
1702	}
1703
1704	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1705		xfs_warn(mp,
1706	"mounting with \"discard\" option, but the device does not support discard");
1707		mp->m_features &= ~XFS_FEAT_DISCARD;
1708	}
1709
1710	if (xfs_has_reflink(mp)) {
1711		if (mp->m_sb.sb_rblocks) {
1712			xfs_alert(mp,
1713	"reflink not compatible with realtime device!");
 
 
 
 
 
1714			error = -EINVAL;
1715			goto out_filestream_unmount;
1716		}
 
 
 
 
1717
1718		if (xfs_globals.always_cow) {
1719			xfs_info(mp, "using DEBUG-only always_cow mode.");
1720			mp->m_always_cow = true;
 
1721		}
1722	}
1723
1724	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
 
 
 
 
 
 
 
1725		xfs_alert(mp,
1726	"reverse mapping btree not compatible with realtime device!");
1727		error = -EINVAL;
1728		goto out_filestream_unmount;
1729	}
1730
1731	error = xfs_mountfs(mp);
1732	if (error)
1733		goto out_filestream_unmount;
1734
1735	root = igrab(VFS_I(mp->m_rootip));
1736	if (!root) {
1737		error = -ENOENT;
1738		goto out_unmount;
1739	}
1740	sb->s_root = d_make_root(root);
1741	if (!sb->s_root) {
1742		error = -ENOMEM;
1743		goto out_unmount;
1744	}
1745
1746	return 0;
1747
1748 out_filestream_unmount:
1749	xfs_filestream_unmount(mp);
1750 out_free_sb:
1751	xfs_freesb(mp);
1752 out_free_scrub_stats:
1753	xchk_mount_stats_free(mp);
1754 out_free_stats:
1755	free_percpu(mp->m_stats.xs_stats);
1756 out_destroy_inodegc:
1757	xfs_inodegc_free_percpu(mp);
1758 out_destroy_counters:
1759	xfs_destroy_percpu_counters(mp);
1760 out_destroy_workqueues:
1761	xfs_destroy_mount_workqueues(mp);
1762 out_shutdown_devices:
1763	xfs_shutdown_devices(mp);
 
 
 
 
1764	return error;
1765
1766 out_unmount:
1767	xfs_filestream_unmount(mp);
1768	xfs_unmountfs(mp);
1769	goto out_free_sb;
1770}
1771
1772static int
1773xfs_fs_get_tree(
1774	struct fs_context	*fc)
1775{
1776	return get_tree_bdev(fc, xfs_fs_fill_super);
1777}
1778
1779static int
1780xfs_remount_rw(
1781	struct xfs_mount	*mp)
1782{
1783	struct xfs_sb		*sbp = &mp->m_sb;
1784	int error;
1785
1786	if (xfs_has_norecovery(mp)) {
1787		xfs_warn(mp,
1788			"ro->rw transition prohibited on norecovery mount");
1789		return -EINVAL;
1790	}
1791
1792	if (xfs_sb_is_v5(sbp) &&
1793	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1794		xfs_warn(mp,
1795	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1796			(sbp->sb_features_ro_compat &
1797				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1798		return -EINVAL;
1799	}
1800
1801	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1802
1803	/*
1804	 * If this is the first remount to writeable state we might have some
1805	 * superblock changes to update.
1806	 */
1807	if (mp->m_update_sb) {
1808		error = xfs_sync_sb(mp, false);
1809		if (error) {
1810			xfs_warn(mp, "failed to write sb changes");
1811			return error;
1812		}
1813		mp->m_update_sb = false;
1814	}
1815
1816	/*
1817	 * Fill out the reserve pool if it is empty. Use the stashed value if
1818	 * it is non-zero, otherwise go with the default.
1819	 */
1820	xfs_restore_resvblks(mp);
1821	xfs_log_work_queue(mp);
1822	xfs_blockgc_start(mp);
1823
1824	/* Create the per-AG metadata reservation pool .*/
1825	error = xfs_fs_reserve_ag_blocks(mp);
1826	if (error && error != -ENOSPC)
1827		return error;
1828
1829	/* Re-enable the background inode inactivation worker. */
1830	xfs_inodegc_start(mp);
1831
1832	return 0;
1833}
1834
1835static int
1836xfs_remount_ro(
1837	struct xfs_mount	*mp)
 
 
 
1838{
1839	struct xfs_icwalk	icw = {
1840		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1841	};
1842	int			error;
1843
1844	/* Flush all the dirty data to disk. */
1845	error = sync_filesystem(mp->m_super);
1846	if (error)
1847		return error;
1848
1849	/*
1850	 * Cancel background eofb scanning so it cannot race with the final
1851	 * log force+buftarg wait and deadlock the remount.
1852	 */
1853	xfs_blockgc_stop(mp);
1854
1855	/*
1856	 * Clear out all remaining COW staging extents and speculative post-EOF
1857	 * preallocations so that we don't leave inodes requiring inactivation
1858	 * cleanups during reclaim on a read-only mount.  We must process every
1859	 * cached inode, so this requires a synchronous cache scan.
1860	 */
1861	error = xfs_blockgc_free_space(mp, &icw);
1862	if (error) {
1863		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1864		return error;
1865	}
1866
1867	/*
1868	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1869	 * flushed all pending inodegc work when it sync'd the filesystem.
1870	 * The VFS holds s_umount, so we know that inodes cannot enter
1871	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1872	 * we send inodes straight to reclaim, so no inodes will be queued.
1873	 */
1874	xfs_inodegc_stop(mp);
1875
1876	/* Free the per-AG metadata reservation pool. */
1877	error = xfs_fs_unreserve_ag_blocks(mp);
1878	if (error) {
1879		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1880		return error;
1881	}
1882
1883	/*
1884	 * Before we sync the metadata, we need to free up the reserve block
1885	 * pool so that the used block count in the superblock on disk is
1886	 * correct at the end of the remount. Stash the current* reserve pool
1887	 * size so that if we get remounted rw, we can return it to the same
1888	 * size.
1889	 */
1890	xfs_save_resvblks(mp);
1891
1892	xfs_log_clean(mp);
1893	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1894
1895	return 0;
1896}
1897
1898/*
1899 * Logically we would return an error here to prevent users from believing
1900 * they might have changed mount options using remount which can't be changed.
1901 *
1902 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1903 * arguments in some cases so we can't blindly reject options, but have to
1904 * check for each specified option if it actually differs from the currently
1905 * set option and only reject it if that's the case.
1906 *
1907 * Until that is implemented we return success for every remount request, and
1908 * silently ignore all options that we can't actually change.
1909 */
1910static int
1911xfs_fs_reconfigure(
1912	struct fs_context *fc)
1913{
1914	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1915	struct xfs_mount        *new_mp = fc->s_fs_info;
1916	int			flags = fc->sb_flags;
1917	int			error;
1918
1919	/* version 5 superblocks always support version counters. */
1920	if (xfs_has_crc(mp))
1921		fc->sb_flags |= SB_I_VERSION;
1922
1923	error = xfs_fs_validate_params(new_mp);
1924	if (error)
1925		return error;
1926
1927	/* inode32 -> inode64 */
1928	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1929		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1930		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1931	}
1932
1933	/* inode64 -> inode32 */
1934	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1935		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1936		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1937	}
1938
1939	/* ro -> rw */
1940	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1941		error = xfs_remount_rw(mp);
1942		if (error)
1943			return error;
1944	}
1945
1946	/* rw -> ro */
1947	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1948		error = xfs_remount_ro(mp);
1949		if (error)
1950			return error;
1951	}
1952
1953	return 0;
1954}
1955
1956static void
1957xfs_fs_free(
1958	struct fs_context	*fc)
 
1959{
1960	struct xfs_mount	*mp = fc->s_fs_info;
1961
1962	/*
1963	 * mp is stored in the fs_context when it is initialized.
1964	 * mp is transferred to the superblock on a successful mount,
1965	 * but if an error occurs before the transfer we have to free
1966	 * it here.
1967	 */
1968	if (mp)
1969		xfs_mount_free(mp);
1970}
1971
1972static const struct fs_context_operations xfs_context_ops = {
1973	.parse_param = xfs_fs_parse_param,
1974	.get_tree    = xfs_fs_get_tree,
1975	.reconfigure = xfs_fs_reconfigure,
1976	.free        = xfs_fs_free,
 
 
 
 
 
 
 
 
 
1977};
1978
1979/*
1980 * WARNING: do not initialise any parameters in this function that depend on
1981 * mount option parsing having already been performed as this can be called from
1982 * fsopen() before any parameters have been set.
1983 */
1984static int xfs_init_fs_context(
1985	struct fs_context	*fc)
1986{
1987	struct xfs_mount	*mp;
1988
1989	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1990	if (!mp)
1991		return -ENOMEM;
1992
1993	spin_lock_init(&mp->m_sb_lock);
1994	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1995	spin_lock_init(&mp->m_perag_lock);
1996	mutex_init(&mp->m_growlock);
1997	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1998	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1999	mp->m_kobj.kobject.kset = xfs_kset;
2000	/*
2001	 * We don't create the finobt per-ag space reservation until after log
2002	 * recovery, so we must set this to true so that an ifree transaction
2003	 * started during log recovery will not depend on space reservations
2004	 * for finobt expansion.
2005	 */
2006	mp->m_finobt_nores = true;
2007
2008	/*
2009	 * These can be overridden by the mount option parsing.
2010	 */
2011	mp->m_logbufs = -1;
2012	mp->m_logbsize = -1;
2013	mp->m_allocsize_log = 16; /* 64k */
2014
2015	fc->s_fs_info = mp;
2016	fc->ops = &xfs_context_ops;
2017
2018	return 0;
2019}
2020
2021static void
2022xfs_kill_sb(
2023	struct super_block		*sb)
2024{
2025	kill_block_super(sb);
2026	xfs_mount_free(XFS_M(sb));
2027}
2028
2029static struct file_system_type xfs_fs_type = {
2030	.owner			= THIS_MODULE,
2031	.name			= "xfs",
2032	.init_fs_context	= xfs_init_fs_context,
2033	.parameters		= xfs_fs_parameters,
2034	.kill_sb		= xfs_kill_sb,
2035	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
2036};
2037MODULE_ALIAS_FS("xfs");
2038
2039STATIC int __init
2040xfs_init_caches(void)
2041{
2042	int		error;
2043
2044	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2045					 SLAB_HWCACHE_ALIGN |
2046					 SLAB_RECLAIM_ACCOUNT |
2047					 SLAB_MEM_SPREAD,
2048					 NULL);
2049	if (!xfs_buf_cache)
2050		goto out;
2051
2052	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2053						sizeof(struct xlog_ticket),
2054						0, 0, NULL);
2055	if (!xfs_log_ticket_cache)
2056		goto out_destroy_buf_cache;
2057
2058	error = xfs_btree_init_cur_caches();
2059	if (error)
2060		goto out_destroy_log_ticket_cache;
2061
2062	error = xfs_defer_init_item_caches();
2063	if (error)
2064		goto out_destroy_btree_cur_cache;
2065
2066	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2067					      sizeof(struct xfs_da_state),
2068					      0, 0, NULL);
2069	if (!xfs_da_state_cache)
2070		goto out_destroy_defer_item_cache;
2071
2072	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2073					   sizeof(struct xfs_ifork),
2074					   0, 0, NULL);
2075	if (!xfs_ifork_cache)
2076		goto out_destroy_da_state_cache;
2077
2078	xfs_trans_cache = kmem_cache_create("xfs_trans",
2079					   sizeof(struct xfs_trans),
2080					   0, 0, NULL);
2081	if (!xfs_trans_cache)
2082		goto out_destroy_ifork_cache;
2083
 
 
2084
2085	/*
2086	 * The size of the cache-allocated buf log item is the maximum
2087	 * size possible under XFS.  This wastes a little bit of memory,
2088	 * but it is much faster.
2089	 */
2090	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2091					      sizeof(struct xfs_buf_log_item),
2092					      0, 0, NULL);
2093	if (!xfs_buf_item_cache)
2094		goto out_destroy_trans_cache;
2095
2096	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2097			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2098			0, 0, NULL);
2099	if (!xfs_efd_cache)
2100		goto out_destroy_buf_item_cache;
2101
2102	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2103			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2104			0, 0, NULL);
2105	if (!xfs_efi_cache)
2106		goto out_destroy_efd_cache;
2107
2108	xfs_inode_cache = kmem_cache_create("xfs_inode",
2109					   sizeof(struct xfs_inode), 0,
2110					   (SLAB_HWCACHE_ALIGN |
2111					    SLAB_RECLAIM_ACCOUNT |
2112					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2113					   xfs_fs_inode_init_once);
2114	if (!xfs_inode_cache)
2115		goto out_destroy_efi_cache;
2116
2117	xfs_ili_cache = kmem_cache_create("xfs_ili",
2118					 sizeof(struct xfs_inode_log_item), 0,
2119					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2120					 NULL);
2121	if (!xfs_ili_cache)
2122		goto out_destroy_inode_cache;
2123
2124	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2125					     sizeof(struct xfs_icreate_item),
2126					     0, 0, NULL);
2127	if (!xfs_icreate_cache)
2128		goto out_destroy_ili_cache;
2129
2130	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2131					 sizeof(struct xfs_rud_log_item),
2132					 0, 0, NULL);
2133	if (!xfs_rud_cache)
2134		goto out_destroy_icreate_cache;
2135
2136	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2137			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2138			0, 0, NULL);
2139	if (!xfs_rui_cache)
2140		goto out_destroy_rud_cache;
2141
2142	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2143					 sizeof(struct xfs_cud_log_item),
2144					 0, 0, NULL);
2145	if (!xfs_cud_cache)
2146		goto out_destroy_rui_cache;
2147
2148	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2149			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2150			0, 0, NULL);
2151	if (!xfs_cui_cache)
2152		goto out_destroy_cud_cache;
2153
2154	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2155					 sizeof(struct xfs_bud_log_item),
2156					 0, 0, NULL);
2157	if (!xfs_bud_cache)
2158		goto out_destroy_cui_cache;
2159
2160	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2161			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2162			0, 0, NULL);
2163	if (!xfs_bui_cache)
2164		goto out_destroy_bud_cache;
2165
2166	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2167					    sizeof(struct xfs_attrd_log_item),
2168					    0, 0, NULL);
2169	if (!xfs_attrd_cache)
2170		goto out_destroy_bui_cache;
2171
2172	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2173					    sizeof(struct xfs_attri_log_item),
2174					    0, 0, NULL);
2175	if (!xfs_attri_cache)
2176		goto out_destroy_attrd_cache;
2177
2178	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2179					     sizeof(struct xfs_iunlink_item),
2180					     0, 0, NULL);
2181	if (!xfs_iunlink_cache)
2182		goto out_destroy_attri_cache;
2183
2184	return 0;
2185
2186 out_destroy_attri_cache:
2187	kmem_cache_destroy(xfs_attri_cache);
2188 out_destroy_attrd_cache:
2189	kmem_cache_destroy(xfs_attrd_cache);
2190 out_destroy_bui_cache:
2191	kmem_cache_destroy(xfs_bui_cache);
2192 out_destroy_bud_cache:
2193	kmem_cache_destroy(xfs_bud_cache);
2194 out_destroy_cui_cache:
2195	kmem_cache_destroy(xfs_cui_cache);
2196 out_destroy_cud_cache:
2197	kmem_cache_destroy(xfs_cud_cache);
2198 out_destroy_rui_cache:
2199	kmem_cache_destroy(xfs_rui_cache);
2200 out_destroy_rud_cache:
2201	kmem_cache_destroy(xfs_rud_cache);
2202 out_destroy_icreate_cache:
2203	kmem_cache_destroy(xfs_icreate_cache);
2204 out_destroy_ili_cache:
2205	kmem_cache_destroy(xfs_ili_cache);
2206 out_destroy_inode_cache:
2207	kmem_cache_destroy(xfs_inode_cache);
2208 out_destroy_efi_cache:
2209	kmem_cache_destroy(xfs_efi_cache);
2210 out_destroy_efd_cache:
2211	kmem_cache_destroy(xfs_efd_cache);
2212 out_destroy_buf_item_cache:
2213	kmem_cache_destroy(xfs_buf_item_cache);
2214 out_destroy_trans_cache:
2215	kmem_cache_destroy(xfs_trans_cache);
2216 out_destroy_ifork_cache:
2217	kmem_cache_destroy(xfs_ifork_cache);
2218 out_destroy_da_state_cache:
2219	kmem_cache_destroy(xfs_da_state_cache);
2220 out_destroy_defer_item_cache:
2221	xfs_defer_destroy_item_caches();
2222 out_destroy_btree_cur_cache:
2223	xfs_btree_destroy_cur_caches();
2224 out_destroy_log_ticket_cache:
2225	kmem_cache_destroy(xfs_log_ticket_cache);
2226 out_destroy_buf_cache:
2227	kmem_cache_destroy(xfs_buf_cache);
2228 out:
2229	return -ENOMEM;
2230}
2231
2232STATIC void
2233xfs_destroy_caches(void)
2234{
2235	/*
2236	 * Make sure all delayed rcu free are flushed before we
2237	 * destroy caches.
2238	 */
2239	rcu_barrier();
2240	kmem_cache_destroy(xfs_iunlink_cache);
2241	kmem_cache_destroy(xfs_attri_cache);
2242	kmem_cache_destroy(xfs_attrd_cache);
2243	kmem_cache_destroy(xfs_bui_cache);
2244	kmem_cache_destroy(xfs_bud_cache);
2245	kmem_cache_destroy(xfs_cui_cache);
2246	kmem_cache_destroy(xfs_cud_cache);
2247	kmem_cache_destroy(xfs_rui_cache);
2248	kmem_cache_destroy(xfs_rud_cache);
2249	kmem_cache_destroy(xfs_icreate_cache);
2250	kmem_cache_destroy(xfs_ili_cache);
2251	kmem_cache_destroy(xfs_inode_cache);
2252	kmem_cache_destroy(xfs_efi_cache);
2253	kmem_cache_destroy(xfs_efd_cache);
2254	kmem_cache_destroy(xfs_buf_item_cache);
2255	kmem_cache_destroy(xfs_trans_cache);
2256	kmem_cache_destroy(xfs_ifork_cache);
2257	kmem_cache_destroy(xfs_da_state_cache);
2258	xfs_defer_destroy_item_caches();
2259	xfs_btree_destroy_cur_caches();
2260	kmem_cache_destroy(xfs_log_ticket_cache);
2261	kmem_cache_destroy(xfs_buf_cache);
2262}
2263
2264STATIC int __init
2265xfs_init_workqueues(void)
2266{
2267	/*
2268	 * The allocation workqueue can be used in memory reclaim situations
2269	 * (writepage path), and parallelism is only limited by the number of
2270	 * AGs in all the filesystems mounted. Hence use the default large
2271	 * max_active value for this workqueue.
2272	 */
2273	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2274			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2275	if (!xfs_alloc_wq)
2276		return -ENOMEM;
2277
2278	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2279			0);
2280	if (!xfs_discard_wq)
2281		goto out_free_alloc_wq;
2282
2283	return 0;
2284out_free_alloc_wq:
2285	destroy_workqueue(xfs_alloc_wq);
2286	return -ENOMEM;
2287}
2288
2289STATIC void
2290xfs_destroy_workqueues(void)
2291{
2292	destroy_workqueue(xfs_discard_wq);
2293	destroy_workqueue(xfs_alloc_wq);
2294}
2295
2296STATIC int __init
2297init_xfs_fs(void)
2298{
2299	int			error;
2300
2301	xfs_check_ondisk_structs();
2302
2303	error = xfs_dahash_test();
2304	if (error)
2305		return error;
2306
2307	printk(KERN_INFO XFS_VERSION_STRING " with "
2308			 XFS_BUILD_OPTIONS " enabled\n");
2309
 
 
 
 
 
2310	xfs_dir_startup();
2311
2312	error = xfs_init_caches();
2313	if (error)
2314		goto out;
2315
2316	error = xfs_init_workqueues();
2317	if (error)
2318		goto out_destroy_caches;
2319
2320	error = xfs_mru_cache_init();
2321	if (error)
2322		goto out_destroy_wq;
2323
 
 
 
 
2324	error = xfs_init_procfs();
2325	if (error)
2326		goto out_mru_cache_uninit;
2327
2328	error = xfs_sysctl_register();
2329	if (error)
2330		goto out_cleanup_procfs;
2331
2332	xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2333
2334	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2335	if (!xfs_kset) {
2336		error = -ENOMEM;
2337		goto out_debugfs_unregister;
2338	}
2339
2340	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2341
2342	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2343	if (!xfsstats.xs_stats) {
2344		error = -ENOMEM;
2345		goto out_kset_unregister;
2346	}
2347
2348	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2349			       "stats");
2350	if (error)
2351		goto out_free_stats;
2352
2353	error = xchk_global_stats_setup(xfs_debugfs);
2354	if (error)
2355		goto out_remove_stats_kobj;
2356
2357#ifdef DEBUG
2358	xfs_dbg_kobj.kobject.kset = xfs_kset;
2359	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2360	if (error)
2361		goto out_remove_scrub_stats;
2362#endif
2363
2364	error = xfs_qm_init();
2365	if (error)
2366		goto out_remove_dbg_kobj;
2367
2368	error = register_filesystem(&xfs_fs_type);
2369	if (error)
2370		goto out_qm_exit;
2371	return 0;
2372
2373 out_qm_exit:
2374	xfs_qm_exit();
2375 out_remove_dbg_kobj:
2376#ifdef DEBUG
2377	xfs_sysfs_del(&xfs_dbg_kobj);
2378 out_remove_scrub_stats:
2379#endif
2380	xchk_global_stats_teardown();
2381 out_remove_stats_kobj:
2382	xfs_sysfs_del(&xfsstats.xs_kobj);
2383 out_free_stats:
2384	free_percpu(xfsstats.xs_stats);
2385 out_kset_unregister:
2386	kset_unregister(xfs_kset);
2387 out_debugfs_unregister:
2388	debugfs_remove(xfs_debugfs);
2389	xfs_sysctl_unregister();
2390 out_cleanup_procfs:
2391	xfs_cleanup_procfs();
 
 
2392 out_mru_cache_uninit:
2393	xfs_mru_cache_uninit();
2394 out_destroy_wq:
2395	xfs_destroy_workqueues();
2396 out_destroy_caches:
2397	xfs_destroy_caches();
2398 out:
2399	return error;
2400}
2401
2402STATIC void __exit
2403exit_xfs_fs(void)
2404{
2405	xfs_qm_exit();
2406	unregister_filesystem(&xfs_fs_type);
2407#ifdef DEBUG
2408	xfs_sysfs_del(&xfs_dbg_kobj);
2409#endif
2410	xchk_global_stats_teardown();
2411	xfs_sysfs_del(&xfsstats.xs_kobj);
2412	free_percpu(xfsstats.xs_stats);
2413	kset_unregister(xfs_kset);
2414	debugfs_remove(xfs_debugfs);
2415	xfs_sysctl_unregister();
2416	xfs_cleanup_procfs();
 
2417	xfs_mru_cache_uninit();
2418	xfs_destroy_workqueues();
2419	xfs_destroy_caches();
2420	xfs_uuid_table_free();
2421}
2422
2423module_init(init_xfs_fs);
2424module_exit(exit_xfs_fs);
2425
2426MODULE_AUTHOR("Silicon Graphics, Inc.");
2427MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2428MODULE_LICENSE("GPL");