Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6
   7#include "xfs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_sb.h"
  13#include "xfs_mount.h"
  14#include "xfs_inode.h"
  15#include "xfs_btree.h"
  16#include "xfs_bmap.h"
  17#include "xfs_alloc.h"
  18#include "xfs_fsops.h"
  19#include "xfs_trans.h"
  20#include "xfs_buf_item.h"
  21#include "xfs_log.h"
  22#include "xfs_log_priv.h"
  23#include "xfs_dir2.h"
  24#include "xfs_extfree_item.h"
  25#include "xfs_mru_cache.h"
  26#include "xfs_inode_item.h"
  27#include "xfs_icache.h"
  28#include "xfs_trace.h"
  29#include "xfs_icreate_item.h"
  30#include "xfs_filestream.h"
  31#include "xfs_quota.h"
  32#include "xfs_sysfs.h"
  33#include "xfs_ondisk.h"
  34#include "xfs_rmap_item.h"
  35#include "xfs_refcount_item.h"
  36#include "xfs_bmap_item.h"
  37#include "xfs_reflink.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
  38
  39#include <linux/magic.h>
  40#include <linux/parser.h>
 
  41
  42static const struct super_operations xfs_super_operations;
  43struct bio_set xfs_ioend_bioset;
  44
 
  45static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
  46#ifdef DEBUG
  47static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
  48#endif
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50/*
  51 * Table driven mount option parser.
  52 */
  53enum {
  54	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
  55	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
  56	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
  57	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
  58	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
  59	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
  60	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
  61	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
  62	Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
  63};
  64
  65static const match_table_t tokens = {
  66	{Opt_logbufs,	"logbufs=%u"},	/* number of XFS log buffers */
  67	{Opt_logbsize,	"logbsize=%s"},	/* size of XFS log buffers */
  68	{Opt_logdev,	"logdev=%s"},	/* log device */
  69	{Opt_rtdev,	"rtdev=%s"},	/* realtime I/O device */
  70	{Opt_biosize,	"biosize=%u"},	/* log2 of preferred buffered io size */
  71	{Opt_wsync,	"wsync"},	/* safe-mode nfs compatible mount */
  72	{Opt_noalign,	"noalign"},	/* turn off stripe alignment */
  73	{Opt_swalloc,	"swalloc"},	/* turn on stripe width allocation */
  74	{Opt_sunit,	"sunit=%u"},	/* data volume stripe unit */
  75	{Opt_swidth,	"swidth=%u"},	/* data volume stripe width */
  76	{Opt_nouuid,	"nouuid"},	/* ignore filesystem UUID */
  77	{Opt_grpid,	"grpid"},	/* group-ID from parent directory */
  78	{Opt_nogrpid,	"nogrpid"},	/* group-ID from current process */
  79	{Opt_bsdgroups,	"bsdgroups"},	/* group-ID from parent directory */
  80	{Opt_sysvgroups,"sysvgroups"},	/* group-ID from current process */
  81	{Opt_allocsize,	"allocsize=%s"},/* preferred allocation size */
  82	{Opt_norecovery,"norecovery"},	/* don't run XFS recovery */
  83	{Opt_inode64,	"inode64"},	/* inodes can be allocated anywhere */
  84	{Opt_inode32,   "inode32"},	/* inode allocation limited to
  85					 * XFS_MAXINUMBER_32 */
  86	{Opt_ikeep,	"ikeep"},	/* do not free empty inode clusters */
  87	{Opt_noikeep,	"noikeep"},	/* free empty inode clusters */
  88	{Opt_largeio,	"largeio"},	/* report large I/O sizes in stat() */
  89	{Opt_nolargeio,	"nolargeio"},	/* do not report large I/O sizes
  90					 * in stat(). */
  91	{Opt_attr2,	"attr2"},	/* do use attr2 attribute format */
  92	{Opt_noattr2,	"noattr2"},	/* do not use attr2 attribute format */
  93	{Opt_filestreams,"filestreams"},/* use filestreams allocator */
  94	{Opt_quota,	"quota"},	/* disk quotas (user) */
  95	{Opt_noquota,	"noquota"},	/* no quotas */
  96	{Opt_usrquota,	"usrquota"},	/* user quota enabled */
  97	{Opt_grpquota,	"grpquota"},	/* group quota enabled */
  98	{Opt_prjquota,	"prjquota"},	/* project quota enabled */
  99	{Opt_uquota,	"uquota"},	/* user quota (IRIX variant) */
 100	{Opt_gquota,	"gquota"},	/* group quota (IRIX variant) */
 101	{Opt_pquota,	"pquota"},	/* project quota (IRIX variant) */
 102	{Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
 103	{Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
 104	{Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
 105	{Opt_qnoenforce, "qnoenforce"},	/* same as uqnoenforce */
 106	{Opt_discard,	"discard"},	/* Discard unused blocks */
 107	{Opt_nodiscard,	"nodiscard"},	/* Do not discard unused blocks */
 108	{Opt_dax,	"dax"},		/* Enable direct access to bdev pages */
 109	{Opt_err,	NULL},
 110};
 111
 112
 113STATIC int
 114suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
 115{
 116	int	last, shift_left_factor = 0, _res;
 117	char	*value;
 118	int	ret = 0;
 119
 120	value = match_strdup(s);
 121	if (!value)
 122		return -ENOMEM;
 123
 124	last = strlen(value) - 1;
 125	if (value[last] == 'K' || value[last] == 'k') {
 126		shift_left_factor = 10;
 127		value[last] = '\0';
 128	}
 129	if (value[last] == 'M' || value[last] == 'm') {
 130		shift_left_factor = 20;
 131		value[last] = '\0';
 132	}
 133	if (value[last] == 'G' || value[last] == 'g') {
 134		shift_left_factor = 30;
 135		value[last] = '\0';
 136	}
 137
 138	if (kstrtoint(value, base, &_res))
 139		ret = -EINVAL;
 140	kfree(value);
 141	*res = _res << shift_left_factor;
 142	return ret;
 143}
 144
 145/*
 146 * This function fills in xfs_mount_t fields based on mount args.
 147 * Note: the superblock has _not_ yet been read in.
 148 *
 149 * Note that this function leaks the various device name allocations on
 150 * failure.  The caller takes care of them.
 151 *
 152 * *sb is const because this is also used to test options on the remount
 153 * path, and we don't want this to have any side effects at remount time.
 154 * Today this function does not change *sb, but just to future-proof...
 155 */
 156STATIC int
 157xfs_parseargs(
 158	struct xfs_mount	*mp,
 159	char			*options)
 160{
 161	const struct super_block *sb = mp->m_super;
 162	char			*p;
 163	substring_t		args[MAX_OPT_ARGS];
 164	int			dsunit = 0;
 165	int			dswidth = 0;
 166	int			iosize = 0;
 167	uint8_t			iosizelog = 0;
 168
 169	/*
 170	 * set up the mount name first so all the errors will refer to the
 171	 * correct device.
 172	 */
 173	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
 174	if (!mp->m_fsname)
 175		return -ENOMEM;
 176	mp->m_fsname_len = strlen(mp->m_fsname) + 1;
 177
 178	/*
 179	 * Copy binary VFS mount flags we are interested in.
 180	 */
 181	if (sb_rdonly(sb))
 182		mp->m_flags |= XFS_MOUNT_RDONLY;
 183	if (sb->s_flags & SB_DIRSYNC)
 184		mp->m_flags |= XFS_MOUNT_DIRSYNC;
 185	if (sb->s_flags & SB_SYNCHRONOUS)
 186		mp->m_flags |= XFS_MOUNT_WSYNC;
 187
 188	/*
 189	 * Set some default flags that could be cleared by the mount option
 190	 * parsing.
 191	 */
 192	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
 193
 194	/*
 195	 * These can be overridden by the mount option parsing.
 196	 */
 197	mp->m_logbufs = -1;
 198	mp->m_logbsize = -1;
 199
 200	if (!options)
 201		goto done;
 202
 203	while ((p = strsep(&options, ",")) != NULL) {
 204		int		token;
 205
 206		if (!*p)
 207			continue;
 208
 209		token = match_token(p, tokens, args);
 210		switch (token) {
 211		case Opt_logbufs:
 212			if (match_int(args, &mp->m_logbufs))
 213				return -EINVAL;
 214			break;
 215		case Opt_logbsize:
 216			if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
 217				return -EINVAL;
 218			break;
 219		case Opt_logdev:
 220			kfree(mp->m_logname);
 221			mp->m_logname = match_strdup(args);
 222			if (!mp->m_logname)
 223				return -ENOMEM;
 224			break;
 225		case Opt_rtdev:
 226			kfree(mp->m_rtname);
 227			mp->m_rtname = match_strdup(args);
 228			if (!mp->m_rtname)
 229				return -ENOMEM;
 230			break;
 231		case Opt_allocsize:
 232		case Opt_biosize:
 233			if (suffix_kstrtoint(args, 10, &iosize))
 234				return -EINVAL;
 235			iosizelog = ffs(iosize) - 1;
 236			break;
 237		case Opt_grpid:
 238		case Opt_bsdgroups:
 239			mp->m_flags |= XFS_MOUNT_GRPID;
 240			break;
 241		case Opt_nogrpid:
 242		case Opt_sysvgroups:
 243			mp->m_flags &= ~XFS_MOUNT_GRPID;
 244			break;
 245		case Opt_wsync:
 246			mp->m_flags |= XFS_MOUNT_WSYNC;
 247			break;
 248		case Opt_norecovery:
 249			mp->m_flags |= XFS_MOUNT_NORECOVERY;
 250			break;
 251		case Opt_noalign:
 252			mp->m_flags |= XFS_MOUNT_NOALIGN;
 253			break;
 254		case Opt_swalloc:
 255			mp->m_flags |= XFS_MOUNT_SWALLOC;
 256			break;
 257		case Opt_sunit:
 258			if (match_int(args, &dsunit))
 259				return -EINVAL;
 260			break;
 261		case Opt_swidth:
 262			if (match_int(args, &dswidth))
 263				return -EINVAL;
 264			break;
 265		case Opt_inode32:
 266			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
 267			break;
 268		case Opt_inode64:
 269			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
 270			break;
 271		case Opt_nouuid:
 272			mp->m_flags |= XFS_MOUNT_NOUUID;
 273			break;
 274		case Opt_ikeep:
 275			mp->m_flags |= XFS_MOUNT_IKEEP;
 276			break;
 277		case Opt_noikeep:
 278			mp->m_flags &= ~XFS_MOUNT_IKEEP;
 279			break;
 280		case Opt_largeio:
 281			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
 282			break;
 283		case Opt_nolargeio:
 284			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
 285			break;
 286		case Opt_attr2:
 287			mp->m_flags |= XFS_MOUNT_ATTR2;
 288			break;
 289		case Opt_noattr2:
 290			mp->m_flags &= ~XFS_MOUNT_ATTR2;
 291			mp->m_flags |= XFS_MOUNT_NOATTR2;
 292			break;
 293		case Opt_filestreams:
 294			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
 295			break;
 296		case Opt_noquota:
 297			mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
 298			mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
 299			mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
 300			break;
 301		case Opt_quota:
 302		case Opt_uquota:
 303		case Opt_usrquota:
 304			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
 305					 XFS_UQUOTA_ENFD);
 306			break;
 307		case Opt_qnoenforce:
 308		case Opt_uqnoenforce:
 309			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
 310			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
 311			break;
 312		case Opt_pquota:
 313		case Opt_prjquota:
 314			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
 315					 XFS_PQUOTA_ENFD);
 316			break;
 317		case Opt_pqnoenforce:
 318			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
 319			mp->m_qflags &= ~XFS_PQUOTA_ENFD;
 320			break;
 321		case Opt_gquota:
 322		case Opt_grpquota:
 323			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
 324					 XFS_GQUOTA_ENFD);
 325			break;
 326		case Opt_gqnoenforce:
 327			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
 328			mp->m_qflags &= ~XFS_GQUOTA_ENFD;
 329			break;
 330		case Opt_discard:
 331			mp->m_flags |= XFS_MOUNT_DISCARD;
 332			break;
 333		case Opt_nodiscard:
 334			mp->m_flags &= ~XFS_MOUNT_DISCARD;
 335			break;
 336#ifdef CONFIG_FS_DAX
 337		case Opt_dax:
 338			mp->m_flags |= XFS_MOUNT_DAX;
 339			break;
 340#endif
 341		default:
 342			xfs_warn(mp, "unknown mount option [%s].", p);
 343			return -EINVAL;
 344		}
 345	}
 346
 347	/*
 348	 * no recovery flag requires a read-only mount
 349	 */
 350	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
 351	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
 352		xfs_warn(mp, "no-recovery mounts must be read-only.");
 353		return -EINVAL;
 354	}
 355
 356	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
 357		xfs_warn(mp,
 358	"sunit and swidth options incompatible with the noalign option");
 359		return -EINVAL;
 360	}
 361
 362#ifndef CONFIG_XFS_QUOTA
 363	if (XFS_IS_QUOTA_RUNNING(mp)) {
 364		xfs_warn(mp, "quota support not available in this kernel.");
 365		return -EINVAL;
 366	}
 367#endif
 368
 369	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
 370		xfs_warn(mp, "sunit and swidth must be specified together");
 371		return -EINVAL;
 372	}
 373
 374	if (dsunit && (dswidth % dsunit != 0)) {
 375		xfs_warn(mp,
 376	"stripe width (%d) must be a multiple of the stripe unit (%d)",
 377			dswidth, dsunit);
 378		return -EINVAL;
 379	}
 380
 381done:
 382	if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
 383		/*
 384		 * At this point the superblock has not been read
 385		 * in, therefore we do not know the block size.
 386		 * Before the mount call ends we will convert
 387		 * these to FSBs.
 388		 */
 389		mp->m_dalign = dsunit;
 390		mp->m_swidth = dswidth;
 391	}
 392
 393	if (mp->m_logbufs != -1 &&
 394	    mp->m_logbufs != 0 &&
 395	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
 396	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
 397		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
 398			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
 399		return -EINVAL;
 400	}
 401	if (mp->m_logbsize != -1 &&
 402	    mp->m_logbsize !=  0 &&
 403	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
 404	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
 405	     !is_power_of_2(mp->m_logbsize))) {
 406		xfs_warn(mp,
 407			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
 408			mp->m_logbsize);
 409		return -EINVAL;
 410	}
 411
 412	if (iosizelog) {
 413		if (iosizelog > XFS_MAX_IO_LOG ||
 414		    iosizelog < XFS_MIN_IO_LOG) {
 415			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
 416				iosizelog, XFS_MIN_IO_LOG,
 417				XFS_MAX_IO_LOG);
 418			return -EINVAL;
 419		}
 420
 421		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
 422		mp->m_readio_log = iosizelog;
 423		mp->m_writeio_log = iosizelog;
 424	}
 425
 426	return 0;
 427}
 428
 429struct proc_xfs_info {
 430	uint64_t	flag;
 431	char		*str;
 432};
 433
 434STATIC void
 435xfs_showargs(
 436	struct xfs_mount	*mp,
 437	struct seq_file		*m)
 438{
 439	static struct proc_xfs_info xfs_info_set[] = {
 440		/* the few simple ones we can get from the mount struct */
 441		{ XFS_MOUNT_IKEEP,		",ikeep" },
 442		{ XFS_MOUNT_WSYNC,		",wsync" },
 443		{ XFS_MOUNT_NOALIGN,		",noalign" },
 444		{ XFS_MOUNT_SWALLOC,		",swalloc" },
 445		{ XFS_MOUNT_NOUUID,		",nouuid" },
 446		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
 447		{ XFS_MOUNT_ATTR2,		",attr2" },
 448		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
 449		{ XFS_MOUNT_GRPID,		",grpid" },
 450		{ XFS_MOUNT_DISCARD,		",discard" },
 451		{ XFS_MOUNT_SMALL_INUMS,	",inode32" },
 452		{ XFS_MOUNT_DAX,		",dax" },
 453		{ 0, NULL }
 454	};
 455	static struct proc_xfs_info xfs_info_unset[] = {
 456		/* the few simple ones we can get from the mount struct */
 457		{ XFS_MOUNT_COMPAT_IOSIZE,	",largeio" },
 458		{ XFS_MOUNT_SMALL_INUMS,	",inode64" },
 459		{ 0, NULL }
 460	};
 
 461	struct proc_xfs_info	*xfs_infop;
 462
 463	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 464		if (mp->m_flags & xfs_infop->flag)
 465			seq_puts(m, xfs_infop->str);
 466	}
 467	for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
 468		if (!(mp->m_flags & xfs_infop->flag))
 469			seq_puts(m, xfs_infop->str);
 470	}
 471
 472	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
 
 
 473		seq_printf(m, ",allocsize=%dk",
 474				(int)(1 << mp->m_writeio_log) >> 10);
 475
 476	if (mp->m_logbufs > 0)
 477		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
 478	if (mp->m_logbsize > 0)
 479		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
 480
 481	if (mp->m_logname)
 482		seq_show_option(m, "logdev", mp->m_logname);
 483	if (mp->m_rtname)
 484		seq_show_option(m, "rtdev", mp->m_rtname);
 485
 486	if (mp->m_dalign > 0)
 487		seq_printf(m, ",sunit=%d",
 488				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 489	if (mp->m_swidth > 0)
 490		seq_printf(m, ",swidth=%d",
 491				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 492
 493	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
 494		seq_puts(m, ",usrquota");
 495	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
 496		seq_puts(m, ",uqnoenforce");
 497
 498	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
 499		if (mp->m_qflags & XFS_PQUOTA_ENFD)
 500			seq_puts(m, ",prjquota");
 501		else
 502			seq_puts(m, ",pqnoenforce");
 503	}
 504	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
 505		if (mp->m_qflags & XFS_GQUOTA_ENFD)
 506			seq_puts(m, ",grpquota");
 507		else
 508			seq_puts(m, ",gqnoenforce");
 509	}
 510
 511	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 512		seq_puts(m, ",noquota");
 513}
 514
 515static uint64_t
 516xfs_max_file_offset(
 517	unsigned int		blockshift)
 518{
 519	unsigned int		pagefactor = 1;
 520	unsigned int		bitshift = BITS_PER_LONG - 1;
 521
 522	/* Figure out maximum filesize, on Linux this can depend on
 523	 * the filesystem blocksize (on 32 bit platforms).
 524	 * __block_write_begin does this in an [unsigned] long long...
 525	 *      page->index << (PAGE_SHIFT - bbits)
 526	 * So, for page sized blocks (4K on 32 bit platforms),
 527	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
 528	 *      (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
 529	 * but for smaller blocksizes it is less (bbits = log2 bsize).
 530	 */
 531
 532#if BITS_PER_LONG == 32
 533	ASSERT(sizeof(sector_t) == 8);
 534	pagefactor = PAGE_SIZE;
 535	bitshift = BITS_PER_LONG;
 536#endif
 537
 538	return (((uint64_t)pagefactor) << bitshift) - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539}
 540
 541/*
 542 * Set parameters for inode allocation heuristics, taking into account
 543 * filesystem size and inode32/inode64 mount options; i.e. specifically
 544 * whether or not XFS_MOUNT_SMALL_INUMS is set.
 545 *
 546 * Inode allocation patterns are altered only if inode32 is requested
 547 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
 548 * If altered, XFS_MOUNT_32BITINODES is set as well.
 549 *
 550 * An agcount independent of that in the mount structure is provided
 551 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 552 * to the potentially higher ag count.
 553 *
 554 * Returns the maximum AG index which may contain inodes.
 555 */
 556xfs_agnumber_t
 557xfs_set_inode_alloc(
 558	struct xfs_mount *mp,
 559	xfs_agnumber_t	agcount)
 560{
 561	xfs_agnumber_t	index;
 562	xfs_agnumber_t	maxagi = 0;
 563	xfs_sb_t	*sbp = &mp->m_sb;
 564	xfs_agnumber_t	max_metadata;
 565	xfs_agino_t	agino;
 566	xfs_ino_t	ino;
 567
 568	/*
 569	 * Calculate how much should be reserved for inodes to meet
 570	 * the max inode percentage.  Used only for inode32.
 571	 */
 572	if (M_IGEO(mp)->maxicount) {
 573		uint64_t	icount;
 574
 575		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 576		do_div(icount, 100);
 577		icount += sbp->sb_agblocks - 1;
 578		do_div(icount, sbp->sb_agblocks);
 579		max_metadata = icount;
 580	} else {
 581		max_metadata = agcount;
 582	}
 583
 584	/* Get the last possible inode in the filesystem */
 585	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
 586	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 587
 588	/*
 589	 * If user asked for no more than 32-bit inodes, and the fs is
 590	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
 591	 * the allocator to accommodate the request.
 592	 */
 593	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
 594		mp->m_flags |= XFS_MOUNT_32BITINODES;
 595	else
 596		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
 597
 598	for (index = 0; index < agcount; index++) {
 599		struct xfs_perag	*pag;
 600
 601		ino = XFS_AGINO_TO_INO(mp, index, agino);
 602
 603		pag = xfs_perag_get(mp, index);
 
 
 
 
 604
 605		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
 606			if (ino > XFS_MAXINUMBER_32) {
 607				pag->pagi_inodeok = 0;
 608				pag->pagf_metadata = 0;
 609			} else {
 610				pag->pagi_inodeok = 1;
 611				maxagi++;
 612				if (index < max_metadata)
 613					pag->pagf_metadata = 1;
 614				else
 615					pag->pagf_metadata = 0;
 616			}
 617		} else {
 618			pag->pagi_inodeok = 1;
 619			pag->pagf_metadata = 0;
 620		}
 621
 622		xfs_perag_put(pag);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 623	}
 624
 625	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
 
 
 
 
 
 
 
 
 
 
 
 626}
 627
 628STATIC int
 629xfs_blkdev_get(
 630	xfs_mount_t		*mp,
 631	const char		*name,
 632	struct block_device	**bdevp)
 633{
 634	int			error = 0;
 635
 636	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 637				    mp);
 638	if (IS_ERR(*bdevp)) {
 639		error = PTR_ERR(*bdevp);
 
 
 640		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
 641	}
 642
 643	return error;
 644}
 645
 646STATIC void
 647xfs_blkdev_put(
 648	struct block_device	*bdev)
 649{
 650	if (bdev)
 651		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 652}
 653
 654void
 655xfs_blkdev_issue_flush(
 656	xfs_buftarg_t		*buftarg)
 657{
 658	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
 659}
 660
 661STATIC void
 662xfs_close_devices(
 663	struct xfs_mount	*mp)
 664{
 665	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
 666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 668		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
 669		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
 670
 671		xfs_free_buftarg(mp->m_logdev_targp);
 672		xfs_blkdev_put(logdev);
 673		fs_put_dax(dax_logdev);
 674	}
 675	if (mp->m_rtdev_targp) {
 676		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
 677		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
 678
 679		xfs_free_buftarg(mp->m_rtdev_targp);
 680		xfs_blkdev_put(rtdev);
 681		fs_put_dax(dax_rtdev);
 682	}
 683	xfs_free_buftarg(mp->m_ddev_targp);
 684	fs_put_dax(dax_ddev);
 685}
 686
 687/*
 688 * The file system configurations are:
 689 *	(1) device (partition) with data and internal log
 690 *	(2) logical volume with data and log subvolumes.
 691 *	(3) logical volume with data, log, and realtime subvolumes.
 692 *
 693 * We only have to handle opening the log and realtime volumes here if
 694 * they are present.  The data subvolume has already been opened by
 695 * get_sb_bdev() and is stored in sb->s_bdev.
 696 */
 697STATIC int
 698xfs_open_devices(
 699	struct xfs_mount	*mp)
 700{
 701	struct block_device	*ddev = mp->m_super->s_bdev;
 702	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
 703	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
 704	struct block_device	*logdev = NULL, *rtdev = NULL;
 705	int			error;
 706
 707	/*
 708	 * Open real time and log devices - order is important.
 709	 */
 710	if (mp->m_logname) {
 711		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
 712		if (error)
 713			goto out;
 714		dax_logdev = fs_dax_get_by_bdev(logdev);
 715	}
 716
 717	if (mp->m_rtname) {
 718		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
 719		if (error)
 720			goto out_close_logdev;
 721
 722		if (rtdev == ddev || rtdev == logdev) {
 
 
 723			xfs_warn(mp,
 724	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 725			error = -EINVAL;
 726			goto out_close_rtdev;
 727		}
 728		dax_rtdev = fs_dax_get_by_bdev(rtdev);
 729	}
 730
 731	/*
 732	 * Setup xfs_mount buffer target pointers
 733	 */
 734	error = -ENOMEM;
 735	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
 736	if (!mp->m_ddev_targp)
 737		goto out_close_rtdev;
 738
 739	if (rtdev) {
 740		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
 741		if (!mp->m_rtdev_targp)
 742			goto out_free_ddev_targ;
 743	}
 744
 745	if (logdev && logdev != ddev) {
 746		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
 747		if (!mp->m_logdev_targp)
 748			goto out_free_rtdev_targ;
 749	} else {
 750		mp->m_logdev_targp = mp->m_ddev_targp;
 
 
 
 751	}
 752
 753	return 0;
 754
 755 out_free_rtdev_targ:
 756	if (mp->m_rtdev_targp)
 757		xfs_free_buftarg(mp->m_rtdev_targp);
 758 out_free_ddev_targ:
 759	xfs_free_buftarg(mp->m_ddev_targp);
 760 out_close_rtdev:
 761	xfs_blkdev_put(rtdev);
 762	fs_put_dax(dax_rtdev);
 763 out_close_logdev:
 764	if (logdev && logdev != ddev) {
 765		xfs_blkdev_put(logdev);
 766		fs_put_dax(dax_logdev);
 767	}
 768 out:
 769	fs_put_dax(dax_ddev);
 770	return error;
 771}
 772
 773/*
 774 * Setup xfs_mount buffer target pointers based on superblock
 775 */
 776STATIC int
 777xfs_setup_devices(
 778	struct xfs_mount	*mp)
 779{
 780	int			error;
 781
 782	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
 783	if (error)
 784		return error;
 785
 786	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 787		unsigned int	log_sector_size = BBSIZE;
 788
 789		if (xfs_sb_version_hassector(&mp->m_sb))
 790			log_sector_size = mp->m_sb.sb_logsectsize;
 791		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 792					    log_sector_size);
 793		if (error)
 794			return error;
 795	}
 796	if (mp->m_rtdev_targp) {
 797		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 798					    mp->m_sb.sb_sectsize);
 799		if (error)
 800			return error;
 801	}
 802
 803	return 0;
 804}
 805
 806STATIC int
 807xfs_init_mount_workqueues(
 808	struct xfs_mount	*mp)
 809{
 810	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
 811			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
 
 812	if (!mp->m_buf_workqueue)
 813		goto out;
 814
 815	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
 816			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 
 817	if (!mp->m_unwritten_workqueue)
 818		goto out_destroy_buf;
 819
 820	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
 821			WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
 822			0, mp->m_fsname);
 823	if (!mp->m_cil_workqueue)
 824		goto out_destroy_unwritten;
 825
 826	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
 827			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 
 828	if (!mp->m_reclaim_workqueue)
 829		goto out_destroy_cil;
 830
 831	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
 832			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
 833	if (!mp->m_eofblocks_workqueue)
 
 834		goto out_destroy_reclaim;
 835
 836	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
 837					       mp->m_fsname);
 
 
 
 
 
 
 838	if (!mp->m_sync_workqueue)
 839		goto out_destroy_eofb;
 840
 841	return 0;
 842
 843out_destroy_eofb:
 844	destroy_workqueue(mp->m_eofblocks_workqueue);
 
 
 845out_destroy_reclaim:
 846	destroy_workqueue(mp->m_reclaim_workqueue);
 847out_destroy_cil:
 848	destroy_workqueue(mp->m_cil_workqueue);
 849out_destroy_unwritten:
 850	destroy_workqueue(mp->m_unwritten_workqueue);
 851out_destroy_buf:
 852	destroy_workqueue(mp->m_buf_workqueue);
 853out:
 854	return -ENOMEM;
 855}
 856
 857STATIC void
 858xfs_destroy_mount_workqueues(
 859	struct xfs_mount	*mp)
 860{
 861	destroy_workqueue(mp->m_sync_workqueue);
 862	destroy_workqueue(mp->m_eofblocks_workqueue);
 
 863	destroy_workqueue(mp->m_reclaim_workqueue);
 864	destroy_workqueue(mp->m_cil_workqueue);
 865	destroy_workqueue(mp->m_unwritten_workqueue);
 866	destroy_workqueue(mp->m_buf_workqueue);
 867}
 868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 869/*
 870 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 871 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 872 * for IO to complete so that we effectively throttle multiple callers to the
 873 * rate at which IO is completing.
 874 */
 875void
 876xfs_flush_inodes(
 877	struct xfs_mount	*mp)
 878{
 879	struct super_block	*sb = mp->m_super;
 
 
 
 
 
 880
 881	if (down_read_trylock(&sb->s_umount)) {
 882		sync_inodes_sb(sb);
 883		up_read(&sb->s_umount);
 884	}
 885}
 886
 887/* Catch misguided souls that try to use this interface on XFS */
 888STATIC struct inode *
 889xfs_fs_alloc_inode(
 890	struct super_block	*sb)
 891{
 892	BUG();
 893	return NULL;
 894}
 895
 896#ifdef DEBUG
 897static void
 898xfs_check_delalloc(
 899	struct xfs_inode	*ip,
 900	int			whichfork)
 901{
 902	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
 903	struct xfs_bmbt_irec	got;
 904	struct xfs_iext_cursor	icur;
 905
 906	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
 907		return;
 908	do {
 909		if (isnullstartblock(got.br_startblock)) {
 910			xfs_warn(ip->i_mount,
 911	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
 912				ip->i_ino,
 913				whichfork == XFS_DATA_FORK ? "data" : "cow",
 914				got.br_startoff, got.br_blockcount);
 915		}
 916	} while (xfs_iext_next_extent(ifp, &icur, &got));
 917}
 918#else
 919#define xfs_check_delalloc(ip, whichfork)	do { } while (0)
 920#endif
 921
 922/*
 923 * Now that the generic code is guaranteed not to be accessing
 924 * the linux inode, we can inactivate and reclaim the inode.
 925 */
 926STATIC void
 927xfs_fs_destroy_inode(
 928	struct inode		*inode)
 929{
 930	struct xfs_inode	*ip = XFS_I(inode);
 931
 932	trace_xfs_destroy_inode(ip);
 933
 934	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 935	XFS_STATS_INC(ip->i_mount, vn_rele);
 936	XFS_STATS_INC(ip->i_mount, vn_remove);
 937
 938	xfs_inactive(ip);
 939
 940	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
 941		xfs_check_delalloc(ip, XFS_DATA_FORK);
 942		xfs_check_delalloc(ip, XFS_COW_FORK);
 943		ASSERT(0);
 944	}
 945
 946	XFS_STATS_INC(ip->i_mount, vn_reclaim);
 947
 948	/*
 949	 * We should never get here with one of the reclaim flags already set.
 950	 */
 951	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
 952	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
 953
 954	/*
 955	 * We always use background reclaim here because even if the
 956	 * inode is clean, it still may be under IO and hence we have
 957	 * to take the flush lock. The background reclaim path handles
 958	 * this more efficiently than we can here, so simply let background
 959	 * reclaim tear down all inodes.
 960	 */
 961	xfs_inode_set_reclaim_tag(ip);
 962}
 963
 964static void
 965xfs_fs_dirty_inode(
 966	struct inode			*inode,
 967	int				flag)
 968{
 969	struct xfs_inode		*ip = XFS_I(inode);
 970	struct xfs_mount		*mp = ip->i_mount;
 971	struct xfs_trans		*tp;
 972
 973	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
 974		return;
 975	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
 
 
 
 
 
 
 976		return;
 977
 978	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
 979		return;
 980	xfs_ilock(ip, XFS_ILOCK_EXCL);
 981	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 982	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
 983	xfs_trans_commit(tp);
 984}
 985
 986/*
 987 * Slab object creation initialisation for the XFS inode.
 988 * This covers only the idempotent fields in the XFS inode;
 989 * all other fields need to be initialised on allocation
 990 * from the slab. This avoids the need to repeatedly initialise
 991 * fields in the xfs inode that left in the initialise state
 992 * when freeing the inode.
 993 */
 994STATIC void
 995xfs_fs_inode_init_once(
 996	void			*inode)
 997{
 998	struct xfs_inode	*ip = inode;
 999
1000	memset(ip, 0, sizeof(struct xfs_inode));
1001
1002	/* vfs inode */
1003	inode_init_once(VFS_I(ip));
1004
1005	/* xfs inode */
1006	atomic_set(&ip->i_pincount, 0);
1007	spin_lock_init(&ip->i_flags_lock);
1008
1009	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1010		     "xfsino", ip->i_ino);
1011	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1012		     "xfsino", ip->i_ino);
1013}
1014
1015/*
1016 * We do an unlocked check for XFS_IDONTCACHE here because we are already
1017 * serialised against cache hits here via the inode->i_lock and igrab() in
1018 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
1019 * racing with us, and it avoids needing to grab a spinlock here for every inode
1020 * we drop the final reference on.
1021 */
1022STATIC int
1023xfs_fs_drop_inode(
1024	struct inode		*inode)
1025{
1026	struct xfs_inode	*ip = XFS_I(inode);
1027
1028	/*
1029	 * If this unlinked inode is in the middle of recovery, don't
1030	 * drop the inode just yet; log recovery will take care of
1031	 * that.  See the comment for this inode flag.
1032	 */
1033	if (ip->i_flags & XFS_IRECOVERY) {
1034		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
1035		return 0;
1036	}
1037
1038	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1039}
1040
1041STATIC void
1042xfs_free_fsname(
1043	struct xfs_mount	*mp)
1044{
1045	kfree(mp->m_fsname);
 
 
 
 
 
 
 
1046	kfree(mp->m_rtname);
1047	kfree(mp->m_logname);
 
1048}
1049
1050STATIC int
1051xfs_fs_sync_fs(
1052	struct super_block	*sb,
1053	int			wait)
1054{
1055	struct xfs_mount	*mp = XFS_M(sb);
 
 
 
1056
1057	/*
1058	 * Doing anything during the async pass would be counterproductive.
1059	 */
1060	if (!wait)
1061		return 0;
1062
1063	xfs_log_force(mp, XFS_LOG_SYNC);
 
 
 
1064	if (laptop_mode) {
1065		/*
1066		 * The disk must be active because we're syncing.
1067		 * We schedule log work now (now that the disk is
1068		 * active) instead of later (when it might not be).
1069		 */
1070		flush_delayed_work(&mp->m_log->l_work);
1071	}
1072
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073	return 0;
1074}
1075
1076STATIC int
1077xfs_fs_statfs(
1078	struct dentry		*dentry,
1079	struct kstatfs		*statp)
1080{
1081	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
1082	xfs_sb_t		*sbp = &mp->m_sb;
1083	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
1084	uint64_t		fakeinos, id;
1085	uint64_t		icount;
1086	uint64_t		ifree;
1087	uint64_t		fdblocks;
1088	xfs_extlen_t		lsize;
1089	int64_t			ffree;
1090
 
 
 
 
 
 
1091	statp->f_type = XFS_SUPER_MAGIC;
1092	statp->f_namelen = MAXNAMELEN - 1;
1093
1094	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1095	statp->f_fsid.val[0] = (u32)id;
1096	statp->f_fsid.val[1] = (u32)(id >> 32);
1097
1098	icount = percpu_counter_sum(&mp->m_icount);
1099	ifree = percpu_counter_sum(&mp->m_ifree);
1100	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1101
1102	spin_lock(&mp->m_sb_lock);
1103	statp->f_bsize = sbp->sb_blocksize;
1104	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1105	statp->f_blocks = sbp->sb_dblocks - lsize;
1106	spin_unlock(&mp->m_sb_lock);
1107
1108	statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
 
 
1109	statp->f_bavail = statp->f_bfree;
1110
1111	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
1112	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
1113	if (M_IGEO(mp)->maxicount)
1114		statp->f_files = min_t(typeof(statp->f_files),
1115					statp->f_files,
1116					M_IGEO(mp)->maxicount);
1117
1118	/* If sb_icount overshot maxicount, report actual allocation */
1119	statp->f_files = max_t(typeof(statp->f_files),
1120					statp->f_files,
1121					sbp->sb_icount);
1122
1123	/* make sure statp->f_ffree does not underflow */
1124	ffree = statp->f_files - (icount - ifree);
1125	statp->f_ffree = max_t(int64_t, ffree, 0);
1126
1127
1128	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1129	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1130			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1131		xfs_qm_statvfs(ip, statp);
1132
1133	if (XFS_IS_REALTIME_MOUNT(mp) &&
1134	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
 
 
1135		statp->f_blocks = sbp->sb_rblocks;
 
1136		statp->f_bavail = statp->f_bfree =
1137			sbp->sb_frextents * sbp->sb_rextsize;
1138	}
1139
 
 
 
 
 
1140	return 0;
1141}
1142
1143STATIC void
1144xfs_save_resvblks(struct xfs_mount *mp)
1145{
1146	uint64_t resblks = 0;
1147
1148	mp->m_resblks_save = mp->m_resblks;
1149	xfs_reserve_blocks(mp, &resblks, NULL);
1150}
1151
1152STATIC void
1153xfs_restore_resvblks(struct xfs_mount *mp)
1154{
1155	uint64_t resblks;
1156
1157	if (mp->m_resblks_save) {
1158		resblks = mp->m_resblks_save;
1159		mp->m_resblks_save = 0;
1160	} else
1161		resblks = xfs_default_resblks(mp);
1162
1163	xfs_reserve_blocks(mp, &resblks, NULL);
1164}
1165
1166/*
1167 * Trigger writeback of all the dirty metadata in the file system.
1168 *
1169 * This ensures that the metadata is written to their location on disk rather
1170 * than just existing in transactions in the log. This means after a quiesce
1171 * there is no log replay required to write the inodes to disk - this is the
1172 * primary difference between a sync and a quiesce.
1173 *
1174 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1175 * it is started again when appropriate.
1176 */
1177void
1178xfs_quiesce_attr(
1179	struct xfs_mount	*mp)
1180{
1181	int	error = 0;
1182
1183	/* wait for all modifications to complete */
1184	while (atomic_read(&mp->m_active_trans) > 0)
1185		delay(100);
1186
1187	/* force the log to unpin objects from the now complete transactions */
1188	xfs_log_force(mp, XFS_LOG_SYNC);
1189
1190	/* reclaim inodes to do any IO before the freeze completes */
1191	xfs_reclaim_inodes(mp, 0);
1192	xfs_reclaim_inodes(mp, SYNC_WAIT);
1193
1194	/* Push the superblock and write an unmount record */
1195	error = xfs_log_sbcount(mp);
1196	if (error)
1197		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1198				"Frozen image may not be consistent.");
1199	/*
1200	 * Just warn here till VFS can correctly support
1201	 * read-only remount without racing.
1202	 */
1203	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1204
1205	xfs_log_quiesce(mp);
1206}
1207
1208STATIC int
1209xfs_test_remount_options(
1210	struct super_block	*sb,
1211	char			*options)
1212{
1213	int			error = 0;
1214	struct xfs_mount	*tmp_mp;
1215
1216	tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
1217	if (!tmp_mp)
1218		return -ENOMEM;
1219
1220	tmp_mp->m_super = sb;
1221	error = xfs_parseargs(tmp_mp, options);
1222	xfs_free_fsname(tmp_mp);
1223	kmem_free(tmp_mp);
1224
1225	return error;
1226}
1227
1228STATIC int
1229xfs_fs_remount(
1230	struct super_block	*sb,
1231	int			*flags,
1232	char			*options)
1233{
1234	struct xfs_mount	*mp = XFS_M(sb);
1235	xfs_sb_t		*sbp = &mp->m_sb;
1236	substring_t		args[MAX_OPT_ARGS];
1237	char			*p;
1238	int			error;
1239
1240	/* First, check for complete junk; i.e. invalid options */
1241	error = xfs_test_remount_options(sb, options);
1242	if (error)
1243		return error;
1244
1245	sync_filesystem(sb);
1246	while ((p = strsep(&options, ",")) != NULL) {
1247		int token;
1248
1249		if (!*p)
1250			continue;
1251
1252		token = match_token(p, tokens, args);
1253		switch (token) {
1254		case Opt_inode64:
1255			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1256			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1257			break;
1258		case Opt_inode32:
1259			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1260			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1261			break;
1262		default:
1263			/*
1264			 * Logically we would return an error here to prevent
1265			 * users from believing they might have changed
1266			 * mount options using remount which can't be changed.
1267			 *
1268			 * But unfortunately mount(8) adds all options from
1269			 * mtab and fstab to the mount arguments in some cases
1270			 * so we can't blindly reject options, but have to
1271			 * check for each specified option if it actually
1272			 * differs from the currently set option and only
1273			 * reject it if that's the case.
1274			 *
1275			 * Until that is implemented we return success for
1276			 * every remount request, and silently ignore all
1277			 * options that we can't actually change.
1278			 */
1279#if 0
1280			xfs_info(mp,
1281		"mount option \"%s\" not supported for remount", p);
1282			return -EINVAL;
1283#else
1284			break;
1285#endif
1286		}
1287	}
1288
1289	/* ro -> rw */
1290	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
1291		if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1292			xfs_warn(mp,
1293		"ro->rw transition prohibited on norecovery mount");
1294			return -EINVAL;
1295		}
1296
1297		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1298		    xfs_sb_has_ro_compat_feature(sbp,
1299					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1300			xfs_warn(mp,
1301"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1302				(sbp->sb_features_ro_compat &
1303					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1304			return -EINVAL;
1305		}
1306
1307		mp->m_flags &= ~XFS_MOUNT_RDONLY;
1308
1309		/*
1310		 * If this is the first remount to writeable state we
1311		 * might have some superblock changes to update.
1312		 */
1313		if (mp->m_update_sb) {
1314			error = xfs_sync_sb(mp, false);
1315			if (error) {
1316				xfs_warn(mp, "failed to write sb changes");
1317				return error;
1318			}
1319			mp->m_update_sb = false;
1320		}
1321
1322		/*
1323		 * Fill out the reserve pool if it is empty. Use the stashed
1324		 * value if it is non-zero, otherwise go with the default.
1325		 */
1326		xfs_restore_resvblks(mp);
1327		xfs_log_work_queue(mp);
1328
1329		/* Recover any CoW blocks that never got remapped. */
1330		error = xfs_reflink_recover_cow(mp);
1331		if (error) {
1332			xfs_err(mp,
1333	"Error %d recovering leftover CoW allocations.", error);
1334			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1335			return error;
1336		}
1337		xfs_start_block_reaping(mp);
1338
1339		/* Create the per-AG metadata reservation pool .*/
1340		error = xfs_fs_reserve_ag_blocks(mp);
1341		if (error && error != -ENOSPC)
1342			return error;
1343	}
1344
1345	/* rw -> ro */
1346	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
1347		/*
1348		 * Cancel background eofb scanning so it cannot race with the
1349		 * final log force+buftarg wait and deadlock the remount.
1350		 */
1351		xfs_stop_block_reaping(mp);
1352
1353		/* Get rid of any leftover CoW reservations... */
1354		error = xfs_icache_free_cowblocks(mp, NULL);
1355		if (error) {
1356			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1357			return error;
1358		}
1359
1360		/* Free the per-AG metadata reservation pool. */
1361		error = xfs_fs_unreserve_ag_blocks(mp);
1362		if (error) {
1363			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1364			return error;
1365		}
1366
1367		/*
1368		 * Before we sync the metadata, we need to free up the reserve
1369		 * block pool so that the used block count in the superblock on
1370		 * disk is correct at the end of the remount. Stash the current
1371		 * reserve pool size so that if we get remounted rw, we can
1372		 * return it to the same size.
1373		 */
1374		xfs_save_resvblks(mp);
1375
1376		xfs_quiesce_attr(mp);
1377		mp->m_flags |= XFS_MOUNT_RDONLY;
1378	}
1379
1380	return 0;
1381}
1382
1383/*
1384 * Second stage of a freeze. The data is already frozen so we only
1385 * need to take care of the metadata. Once that's done sync the superblock
1386 * to the log to dirty it in case of a crash while frozen. This ensures that we
1387 * will recover the unlinked inode lists on the next mount.
1388 */
1389STATIC int
1390xfs_fs_freeze(
1391	struct super_block	*sb)
1392{
1393	struct xfs_mount	*mp = XFS_M(sb);
 
 
1394
1395	xfs_stop_block_reaping(mp);
 
 
 
 
 
1396	xfs_save_resvblks(mp);
1397	xfs_quiesce_attr(mp);
1398	return xfs_sync_sb(mp, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1399}
1400
1401STATIC int
1402xfs_fs_unfreeze(
1403	struct super_block	*sb)
1404{
1405	struct xfs_mount	*mp = XFS_M(sb);
1406
1407	xfs_restore_resvblks(mp);
1408	xfs_log_work_queue(mp);
1409	xfs_start_block_reaping(mp);
1410	return 0;
1411}
1412
1413STATIC int
1414xfs_fs_show_options(
1415	struct seq_file		*m,
1416	struct dentry		*root)
1417{
1418	xfs_showargs(XFS_M(root->d_sb), m);
 
 
 
 
 
1419	return 0;
1420}
1421
1422/*
1423 * This function fills in xfs_mount_t fields based on mount args.
1424 * Note: the superblock _has_ now been read in.
1425 */
1426STATIC int
1427xfs_finish_flags(
1428	struct xfs_mount	*mp)
1429{
1430	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1431
1432	/* Fail a mount where the logbuf is smaller than the log stripe */
1433	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1434		if (mp->m_logbsize <= 0 &&
1435		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1436			mp->m_logbsize = mp->m_sb.sb_logsunit;
1437		} else if (mp->m_logbsize > 0 &&
1438			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1439			xfs_warn(mp,
1440		"logbuf size must be greater than or equal to log stripe size");
1441			return -EINVAL;
1442		}
1443	} else {
1444		/* Fail a mount if the logbuf is larger than 32K */
1445		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1446			xfs_warn(mp,
1447		"logbuf size for version 1 logs must be 16K or 32K");
1448			return -EINVAL;
1449		}
1450	}
1451
1452	/*
1453	 * V5 filesystems always use attr2 format for attributes.
1454	 */
1455	if (xfs_sb_version_hascrc(&mp->m_sb) &&
1456	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1457		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1458			     "attr2 is always enabled for V5 filesystems.");
1459		return -EINVAL;
1460	}
1461
1462	/*
1463	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1464	 * told by noattr2 to turn it off
1465	 */
1466	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1467	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
1468		mp->m_flags |= XFS_MOUNT_ATTR2;
1469
1470	/*
1471	 * prohibit r/w mounts of read-only filesystems
1472	 */
1473	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1474		xfs_warn(mp,
1475			"cannot mount a read-only filesystem as read-write");
1476		return -EROFS;
1477	}
1478
1479	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1480	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1481	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1482		xfs_warn(mp,
1483		  "Super block does not support project and group quota together");
1484		return -EINVAL;
1485	}
1486
1487	return 0;
1488}
1489
1490static int
1491xfs_init_percpu_counters(
1492	struct xfs_mount	*mp)
1493{
1494	int		error;
1495
1496	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1497	if (error)
1498		return -ENOMEM;
1499
1500	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1501	if (error)
1502		goto free_icount;
1503
1504	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1505	if (error)
1506		goto free_ifree;
1507
1508	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1509	if (error)
1510		goto free_fdblocks;
1511
 
 
 
 
 
 
 
 
1512	return 0;
1513
 
 
 
 
1514free_fdblocks:
1515	percpu_counter_destroy(&mp->m_fdblocks);
1516free_ifree:
1517	percpu_counter_destroy(&mp->m_ifree);
1518free_icount:
1519	percpu_counter_destroy(&mp->m_icount);
1520	return -ENOMEM;
1521}
1522
1523void
1524xfs_reinit_percpu_counters(
1525	struct xfs_mount	*mp)
1526{
1527	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1528	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1529	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
 
1530}
1531
1532static void
1533xfs_destroy_percpu_counters(
1534	struct xfs_mount	*mp)
1535{
1536	percpu_counter_destroy(&mp->m_icount);
1537	percpu_counter_destroy(&mp->m_ifree);
1538	percpu_counter_destroy(&mp->m_fdblocks);
1539	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
 
 
 
1540	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1541	percpu_counter_destroy(&mp->m_delalloc_blks);
 
1542}
1543
1544static struct xfs_mount *
1545xfs_mount_alloc(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1546	struct super_block	*sb)
1547{
1548	struct xfs_mount	*mp;
1549
1550	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1551	if (!mp)
1552		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1553
1554	mp->m_super = sb;
1555	spin_lock_init(&mp->m_sb_lock);
1556	spin_lock_init(&mp->m_agirotor_lock);
1557	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1558	spin_lock_init(&mp->m_perag_lock);
1559	mutex_init(&mp->m_growlock);
1560	atomic_set(&mp->m_active_trans, 0);
1561	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1562	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1563	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1564	mp->m_kobj.kobject.kset = xfs_kset;
1565	/*
1566	 * We don't create the finobt per-ag space reservation until after log
1567	 * recovery, so we must set this to true so that an ifree transaction
1568	 * started during log recovery will not depend on space reservations
1569	 * for finobt expansion.
1570	 */
1571	mp->m_finobt_nores = true;
1572	return mp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1573}
1574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575
1576STATIC int
1577xfs_fs_fill_super(
1578	struct super_block	*sb,
1579	void			*data,
1580	int			silent)
1581{
 
1582	struct inode		*root;
1583	struct xfs_mount	*mp = NULL;
1584	int			flags = 0, error = -ENOMEM;
 
1585
1586	/*
1587	 * allocate mp and do all low-level struct initializations before we
1588	 * attach it to the super
1589	 */
1590	mp = xfs_mount_alloc(sb);
1591	if (!mp)
1592		goto out;
1593	sb->s_fs_info = mp;
 
 
 
1594
1595	error = xfs_parseargs(mp, (char *)data);
1596	if (error)
1597		goto out_free_fsname;
1598
1599	sb_min_blocksize(sb, BBSIZE);
1600	sb->s_xattr = xfs_xattr_handlers;
1601	sb->s_export_op = &xfs_export_operations;
1602#ifdef CONFIG_XFS_QUOTA
1603	sb->s_qcop = &xfs_quotactl_operations;
1604	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1605#endif
1606	sb->s_op = &xfs_super_operations;
1607
1608	/*
1609	 * Delay mount work if the debug hook is set. This is debug
1610	 * instrumention to coordinate simulation of xfs mount failures with
1611	 * VFS superblock operations
1612	 */
1613	if (xfs_globals.mount_delay) {
1614		xfs_notice(mp, "Delaying mount for %d seconds.",
1615			xfs_globals.mount_delay);
1616		msleep(xfs_globals.mount_delay * 1000);
1617	}
1618
1619	if (silent)
1620		flags |= XFS_MFSI_QUIET;
1621
1622	error = xfs_open_devices(mp);
1623	if (error)
1624		goto out_free_fsname;
 
 
 
 
 
 
 
1625
1626	error = xfs_init_mount_workqueues(mp);
1627	if (error)
1628		goto out_close_devices;
1629
1630	error = xfs_init_percpu_counters(mp);
1631	if (error)
1632		goto out_destroy_workqueues;
1633
 
 
 
 
1634	/* Allocate stats memory before we do operations that might use it */
1635	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1636	if (!mp->m_stats.xs_stats) {
1637		error = -ENOMEM;
1638		goto out_destroy_counters;
1639	}
1640
1641	error = xfs_readsb(mp, flags);
1642	if (error)
1643		goto out_free_stats;
1644
 
 
 
 
1645	error = xfs_finish_flags(mp);
1646	if (error)
1647		goto out_free_sb;
1648
1649	error = xfs_setup_devices(mp);
1650	if (error)
1651		goto out_free_sb;
1652
1653	error = xfs_filestream_mount(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1654	if (error)
1655		goto out_free_sb;
1656
 
 
 
 
1657	/*
1658	 * we must configure the block size in the superblock before we run the
1659	 * full mount process as the mount process can lookup and cache inodes.
1660	 */
1661	sb->s_magic = XFS_SUPER_MAGIC;
1662	sb->s_blocksize = mp->m_sb.sb_blocksize;
1663	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1664	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1665	sb->s_max_links = XFS_MAXLINK;
1666	sb->s_time_gran = 1;
1667	sb->s_time_min = S32_MIN;
1668	sb->s_time_max = S32_MAX;
 
 
 
 
 
 
1669	sb->s_iflags |= SB_I_CGROUPWB;
1670
1671	set_posix_acl_flag(sb);
1672
1673	/* version 5 superblocks support inode version counters. */
1674	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1675		sb->s_flags |= SB_I_VERSION;
1676
1677	if (mp->m_flags & XFS_MOUNT_DAX) {
1678		bool rtdev_is_dax = false, datadev_is_dax;
1679
1680		xfs_warn(mp,
1681		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1682
1683		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1684			sb->s_blocksize);
1685		if (mp->m_rtdev_targp)
1686			rtdev_is_dax = bdev_dax_supported(
1687				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1688		if (!rtdev_is_dax && !datadev_is_dax) {
1689			xfs_alert(mp,
1690			"DAX unsupported by block device. Turning off DAX.");
1691			mp->m_flags &= ~XFS_MOUNT_DAX;
1692		}
1693		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1694			xfs_alert(mp,
1695		"DAX and reflink cannot be used together!");
1696			error = -EINVAL;
1697			goto out_filestream_unmount;
1698		}
1699	}
1700
1701	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1702		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1703
1704		if (!blk_queue_discard(q)) {
1705			xfs_warn(mp, "mounting with \"discard\" option, but "
1706					"the device does not support discard");
1707			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1708		}
1709	}
1710
1711	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
 
 
 
1712		if (mp->m_sb.sb_rblocks) {
1713			xfs_alert(mp,
1714	"reflink not compatible with realtime device!");
1715			error = -EINVAL;
1716			goto out_filestream_unmount;
1717		}
1718
1719		if (xfs_globals.always_cow) {
1720			xfs_info(mp, "using DEBUG-only always_cow mode.");
1721			mp->m_always_cow = true;
1722		}
1723	}
1724
1725	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1726		xfs_alert(mp,
1727	"reverse mapping btree not compatible with realtime device!");
1728		error = -EINVAL;
1729		goto out_filestream_unmount;
1730	}
1731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732	error = xfs_mountfs(mp);
1733	if (error)
1734		goto out_filestream_unmount;
1735
1736	root = igrab(VFS_I(mp->m_rootip));
1737	if (!root) {
1738		error = -ENOENT;
1739		goto out_unmount;
1740	}
1741	sb->s_root = d_make_root(root);
1742	if (!sb->s_root) {
1743		error = -ENOMEM;
1744		goto out_unmount;
1745	}
1746
1747	return 0;
1748
1749 out_filestream_unmount:
1750	xfs_filestream_unmount(mp);
 
 
1751 out_free_sb:
1752	xfs_freesb(mp);
 
 
1753 out_free_stats:
1754	free_percpu(mp->m_stats.xs_stats);
 
 
1755 out_destroy_counters:
1756	xfs_destroy_percpu_counters(mp);
1757 out_destroy_workqueues:
1758	xfs_destroy_mount_workqueues(mp);
1759 out_close_devices:
1760	xfs_close_devices(mp);
1761 out_free_fsname:
1762	sb->s_fs_info = NULL;
1763	xfs_free_fsname(mp);
1764	kfree(mp);
1765 out:
1766	return error;
1767
1768 out_unmount:
1769	xfs_filestream_unmount(mp);
1770	xfs_unmountfs(mp);
1771	goto out_free_sb;
1772}
1773
1774STATIC void
1775xfs_fs_put_super(
1776	struct super_block	*sb)
1777{
1778	struct xfs_mount	*mp = XFS_M(sb);
 
1779
1780	/* if ->fill_super failed, we have no mount to tear down */
1781	if (!sb->s_fs_info)
1782		return;
 
 
 
1783
1784	xfs_notice(mp, "Unmounting Filesystem");
1785	xfs_filestream_unmount(mp);
1786	xfs_unmountfs(mp);
 
 
1787
1788	xfs_freesb(mp);
1789	free_percpu(mp->m_stats.xs_stats);
1790	xfs_destroy_percpu_counters(mp);
1791	xfs_destroy_mount_workqueues(mp);
1792	xfs_close_devices(mp);
 
 
 
1793
1794	sb->s_fs_info = NULL;
1795	xfs_free_fsname(mp);
1796	kfree(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1797}
1798
1799STATIC struct dentry *
1800xfs_fs_mount(
1801	struct file_system_type	*fs_type,
1802	int			flags,
1803	const char		*dev_name,
1804	void			*data)
1805{
1806	return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807}
1808
1809static long
1810xfs_fs_nr_cached_objects(
1811	struct super_block	*sb,
1812	struct shrink_control	*sc)
 
 
 
 
 
 
 
 
 
 
 
1813{
1814	/* Paranoia: catch incorrect calls during mount setup or teardown */
1815	if (WARN_ON_ONCE(!sb->s_fs_info))
1816		return 0;
1817	return xfs_reclaim_inodes_count(XFS_M(sb));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1818}
1819
1820static long
1821xfs_fs_free_cached_objects(
1822	struct super_block	*sb,
1823	struct shrink_control	*sc)
1824{
1825	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
 
 
 
 
 
 
 
 
 
1826}
1827
1828static const struct super_operations xfs_super_operations = {
1829	.alloc_inode		= xfs_fs_alloc_inode,
1830	.destroy_inode		= xfs_fs_destroy_inode,
1831	.dirty_inode		= xfs_fs_dirty_inode,
1832	.drop_inode		= xfs_fs_drop_inode,
1833	.put_super		= xfs_fs_put_super,
1834	.sync_fs		= xfs_fs_sync_fs,
1835	.freeze_fs		= xfs_fs_freeze,
1836	.unfreeze_fs		= xfs_fs_unfreeze,
1837	.statfs			= xfs_fs_statfs,
1838	.remount_fs		= xfs_fs_remount,
1839	.show_options		= xfs_fs_show_options,
1840	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1841	.free_cached_objects	= xfs_fs_free_cached_objects,
1842};
1843
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1844static struct file_system_type xfs_fs_type = {
1845	.owner			= THIS_MODULE,
1846	.name			= "xfs",
1847	.mount			= xfs_fs_mount,
1848	.kill_sb		= kill_block_super,
1849	.fs_flags		= FS_REQUIRES_DEV,
 
1850};
1851MODULE_ALIAS_FS("xfs");
1852
1853STATIC int __init
1854xfs_init_zones(void)
1855{
1856	if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1857			offsetof(struct xfs_ioend, io_inline_bio),
1858			BIOSET_NEED_BVECS))
 
 
 
 
1859		goto out;
1860
1861	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1862						"xfs_log_ticket");
1863	if (!xfs_log_ticket_zone)
1864		goto out_free_ioend_bioset;
1865
1866	xfs_bmap_free_item_zone = kmem_zone_init(
1867			sizeof(struct xfs_extent_free_item),
1868			"xfs_bmap_free_item");
1869	if (!xfs_bmap_free_item_zone)
1870		goto out_destroy_log_ticket_zone;
1871
1872	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1873						"xfs_btree_cur");
1874	if (!xfs_btree_cur_zone)
1875		goto out_destroy_bmap_free_item_zone;
1876
1877	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1878						"xfs_da_state");
1879	if (!xfs_da_state_zone)
1880		goto out_destroy_btree_cur_zone;
1881
1882	xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
1883	if (!xfs_ifork_zone)
1884		goto out_destroy_da_state_zone;
1885
1886	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1887	if (!xfs_trans_zone)
1888		goto out_destroy_ifork_zone;
 
 
 
 
 
 
 
1889
1890
1891	/*
1892	 * The size of the zone allocated buf log item is the maximum
1893	 * size possible under XFS.  This wastes a little bit of memory,
1894	 * but it is much faster.
1895	 */
1896	xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1897					   "xfs_buf_item");
1898	if (!xfs_buf_item_zone)
1899		goto out_destroy_trans_zone;
1900
1901	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1902			((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1903				 sizeof(xfs_extent_t))), "xfs_efd_item");
1904	if (!xfs_efd_zone)
1905		goto out_destroy_buf_item_zone;
1906
1907	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1908			((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1909				sizeof(xfs_extent_t))), "xfs_efi_item");
1910	if (!xfs_efi_zone)
1911		goto out_destroy_efd_zone;
1912
1913	xfs_inode_zone =
1914		kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1915			KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
1916			KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
1917	if (!xfs_inode_zone)
1918		goto out_destroy_efi_zone;
1919
1920	xfs_ili_zone =
1921		kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1922					KM_ZONE_SPREAD, NULL);
1923	if (!xfs_ili_zone)
1924		goto out_destroy_inode_zone;
1925	xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1926					"xfs_icr");
1927	if (!xfs_icreate_zone)
1928		goto out_destroy_ili_zone;
1929
1930	xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
1931			"xfs_rud_item");
1932	if (!xfs_rud_zone)
1933		goto out_destroy_icreate_zone;
 
 
 
 
 
 
 
1934
1935	xfs_rui_zone = kmem_zone_init(
1936			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1937			"xfs_rui_item");
1938	if (!xfs_rui_zone)
1939		goto out_destroy_rud_zone;
1940
1941	xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
1942			"xfs_cud_item");
1943	if (!xfs_cud_zone)
1944		goto out_destroy_rui_zone;
 
1945
1946	xfs_cui_zone = kmem_zone_init(
1947			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1948			"xfs_cui_item");
1949	if (!xfs_cui_zone)
1950		goto out_destroy_cud_zone;
1951
1952	xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
1953			"xfs_bud_item");
1954	if (!xfs_bud_zone)
1955		goto out_destroy_cui_zone;
 
1956
1957	xfs_bui_zone = kmem_zone_init(
1958			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1959			"xfs_bui_item");
1960	if (!xfs_bui_zone)
1961		goto out_destroy_bud_zone;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1962
1963	return 0;
1964
1965 out_destroy_bud_zone:
1966	kmem_zone_destroy(xfs_bud_zone);
1967 out_destroy_cui_zone:
1968	kmem_zone_destroy(xfs_cui_zone);
1969 out_destroy_cud_zone:
1970	kmem_zone_destroy(xfs_cud_zone);
1971 out_destroy_rui_zone:
1972	kmem_zone_destroy(xfs_rui_zone);
1973 out_destroy_rud_zone:
1974	kmem_zone_destroy(xfs_rud_zone);
1975 out_destroy_icreate_zone:
1976	kmem_zone_destroy(xfs_icreate_zone);
1977 out_destroy_ili_zone:
1978	kmem_zone_destroy(xfs_ili_zone);
1979 out_destroy_inode_zone:
1980	kmem_zone_destroy(xfs_inode_zone);
1981 out_destroy_efi_zone:
1982	kmem_zone_destroy(xfs_efi_zone);
1983 out_destroy_efd_zone:
1984	kmem_zone_destroy(xfs_efd_zone);
1985 out_destroy_buf_item_zone:
1986	kmem_zone_destroy(xfs_buf_item_zone);
1987 out_destroy_trans_zone:
1988	kmem_zone_destroy(xfs_trans_zone);
1989 out_destroy_ifork_zone:
1990	kmem_zone_destroy(xfs_ifork_zone);
1991 out_destroy_da_state_zone:
1992	kmem_zone_destroy(xfs_da_state_zone);
1993 out_destroy_btree_cur_zone:
1994	kmem_zone_destroy(xfs_btree_cur_zone);
1995 out_destroy_bmap_free_item_zone:
1996	kmem_zone_destroy(xfs_bmap_free_item_zone);
1997 out_destroy_log_ticket_zone:
1998	kmem_zone_destroy(xfs_log_ticket_zone);
1999 out_free_ioend_bioset:
2000	bioset_exit(&xfs_ioend_bioset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2001 out:
2002	return -ENOMEM;
2003}
2004
2005STATIC void
2006xfs_destroy_zones(void)
2007{
2008	/*
2009	 * Make sure all delayed rcu free are flushed before we
2010	 * destroy caches.
2011	 */
2012	rcu_barrier();
2013	kmem_zone_destroy(xfs_bui_zone);
2014	kmem_zone_destroy(xfs_bud_zone);
2015	kmem_zone_destroy(xfs_cui_zone);
2016	kmem_zone_destroy(xfs_cud_zone);
2017	kmem_zone_destroy(xfs_rui_zone);
2018	kmem_zone_destroy(xfs_rud_zone);
2019	kmem_zone_destroy(xfs_icreate_zone);
2020	kmem_zone_destroy(xfs_ili_zone);
2021	kmem_zone_destroy(xfs_inode_zone);
2022	kmem_zone_destroy(xfs_efi_zone);
2023	kmem_zone_destroy(xfs_efd_zone);
2024	kmem_zone_destroy(xfs_buf_item_zone);
2025	kmem_zone_destroy(xfs_trans_zone);
2026	kmem_zone_destroy(xfs_ifork_zone);
2027	kmem_zone_destroy(xfs_da_state_zone);
2028	kmem_zone_destroy(xfs_btree_cur_zone);
2029	kmem_zone_destroy(xfs_bmap_free_item_zone);
2030	kmem_zone_destroy(xfs_log_ticket_zone);
2031	bioset_exit(&xfs_ioend_bioset);
 
 
 
 
 
 
 
2032}
2033
2034STATIC int __init
2035xfs_init_workqueues(void)
2036{
2037	/*
2038	 * The allocation workqueue can be used in memory reclaim situations
2039	 * (writepage path), and parallelism is only limited by the number of
2040	 * AGs in all the filesystems mounted. Hence use the default large
2041	 * max_active value for this workqueue.
2042	 */
2043	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2044			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2045	if (!xfs_alloc_wq)
2046		return -ENOMEM;
2047
2048	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
 
2049	if (!xfs_discard_wq)
2050		goto out_free_alloc_wq;
2051
2052	return 0;
2053out_free_alloc_wq:
2054	destroy_workqueue(xfs_alloc_wq);
2055	return -ENOMEM;
2056}
2057
2058STATIC void
2059xfs_destroy_workqueues(void)
2060{
2061	destroy_workqueue(xfs_discard_wq);
2062	destroy_workqueue(xfs_alloc_wq);
2063}
2064
2065STATIC int __init
2066init_xfs_fs(void)
2067{
2068	int			error;
2069
2070	xfs_check_ondisk_structs();
2071
 
 
 
 
2072	printk(KERN_INFO XFS_VERSION_STRING " with "
2073			 XFS_BUILD_OPTIONS " enabled\n");
2074
2075	xfs_dir_startup();
2076
2077	error = xfs_init_zones();
2078	if (error)
2079		goto out;
2080
2081	error = xfs_init_workqueues();
2082	if (error)
2083		goto out_destroy_zones;
2084
2085	error = xfs_mru_cache_init();
2086	if (error)
2087		goto out_destroy_wq;
2088
2089	error = xfs_buf_init();
2090	if (error)
2091		goto out_mru_cache_uninit;
2092
2093	error = xfs_init_procfs();
2094	if (error)
2095		goto out_buf_terminate;
2096
2097	error = xfs_sysctl_register();
2098	if (error)
2099		goto out_cleanup_procfs;
2100
 
 
2101	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2102	if (!xfs_kset) {
2103		error = -ENOMEM;
2104		goto out_sysctl_unregister;
2105	}
2106
2107	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2108
2109	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2110	if (!xfsstats.xs_stats) {
2111		error = -ENOMEM;
2112		goto out_kset_unregister;
2113	}
2114
2115	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2116			       "stats");
2117	if (error)
2118		goto out_free_stats;
2119
 
 
 
 
2120#ifdef DEBUG
2121	xfs_dbg_kobj.kobject.kset = xfs_kset;
2122	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2123	if (error)
2124		goto out_remove_stats_kobj;
2125#endif
2126
2127	error = xfs_qm_init();
2128	if (error)
2129		goto out_remove_dbg_kobj;
2130
2131	error = register_filesystem(&xfs_fs_type);
2132	if (error)
2133		goto out_qm_exit;
2134	return 0;
2135
2136 out_qm_exit:
2137	xfs_qm_exit();
2138 out_remove_dbg_kobj:
2139#ifdef DEBUG
2140	xfs_sysfs_del(&xfs_dbg_kobj);
2141 out_remove_stats_kobj:
2142#endif
 
 
2143	xfs_sysfs_del(&xfsstats.xs_kobj);
2144 out_free_stats:
2145	free_percpu(xfsstats.xs_stats);
2146 out_kset_unregister:
2147	kset_unregister(xfs_kset);
2148 out_sysctl_unregister:
 
2149	xfs_sysctl_unregister();
2150 out_cleanup_procfs:
2151	xfs_cleanup_procfs();
2152 out_buf_terminate:
2153	xfs_buf_terminate();
2154 out_mru_cache_uninit:
2155	xfs_mru_cache_uninit();
2156 out_destroy_wq:
2157	xfs_destroy_workqueues();
2158 out_destroy_zones:
2159	xfs_destroy_zones();
2160 out:
2161	return error;
2162}
2163
2164STATIC void __exit
2165exit_xfs_fs(void)
2166{
2167	xfs_qm_exit();
2168	unregister_filesystem(&xfs_fs_type);
2169#ifdef DEBUG
2170	xfs_sysfs_del(&xfs_dbg_kobj);
2171#endif
 
2172	xfs_sysfs_del(&xfsstats.xs_kobj);
2173	free_percpu(xfsstats.xs_stats);
2174	kset_unregister(xfs_kset);
 
2175	xfs_sysctl_unregister();
2176	xfs_cleanup_procfs();
2177	xfs_buf_terminate();
2178	xfs_mru_cache_uninit();
2179	xfs_destroy_workqueues();
2180	xfs_destroy_zones();
2181	xfs_uuid_table_free();
2182}
2183
2184module_init(init_xfs_fs);
2185module_exit(exit_xfs_fs);
2186
2187MODULE_AUTHOR("Silicon Graphics, Inc.");
2188MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2189MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6
   7#include "xfs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_sb.h"
  13#include "xfs_mount.h"
  14#include "xfs_inode.h"
  15#include "xfs_btree.h"
  16#include "xfs_bmap.h"
  17#include "xfs_alloc.h"
  18#include "xfs_fsops.h"
  19#include "xfs_trans.h"
  20#include "xfs_buf_item.h"
  21#include "xfs_log.h"
  22#include "xfs_log_priv.h"
  23#include "xfs_dir2.h"
  24#include "xfs_extfree_item.h"
  25#include "xfs_mru_cache.h"
  26#include "xfs_inode_item.h"
  27#include "xfs_icache.h"
  28#include "xfs_trace.h"
  29#include "xfs_icreate_item.h"
  30#include "xfs_filestream.h"
  31#include "xfs_quota.h"
  32#include "xfs_sysfs.h"
  33#include "xfs_ondisk.h"
  34#include "xfs_rmap_item.h"
  35#include "xfs_refcount_item.h"
  36#include "xfs_bmap_item.h"
  37#include "xfs_reflink.h"
  38#include "xfs_pwork.h"
  39#include "xfs_ag.h"
  40#include "xfs_defer.h"
  41#include "xfs_attr_item.h"
  42#include "xfs_xattr.h"
  43#include "xfs_iunlink_item.h"
  44#include "xfs_dahash_test.h"
  45#include "xfs_rtbitmap.h"
  46#include "xfs_exchmaps_item.h"
  47#include "xfs_parent.h"
  48#include "xfs_rtalloc.h"
  49#include "scrub/stats.h"
  50#include "scrub/rcbag_btree.h"
  51
  52#include <linux/magic.h>
  53#include <linux/fs_context.h>
  54#include <linux/fs_parser.h>
  55
  56static const struct super_operations xfs_super_operations;
 
  57
  58static struct dentry *xfs_debugfs;	/* top-level xfs debugfs dir */
  59static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
  60#ifdef DEBUG
  61static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
  62#endif
  63
  64enum xfs_dax_mode {
  65	XFS_DAX_INODE = 0,
  66	XFS_DAX_ALWAYS = 1,
  67	XFS_DAX_NEVER = 2,
  68};
  69
  70/* Were quota mount options provided?  Must use the upper 16 bits of qflags. */
  71#define XFS_QFLAGS_MNTOPTS	(1U << 31)
  72
  73static void
  74xfs_mount_set_dax_mode(
  75	struct xfs_mount	*mp,
  76	enum xfs_dax_mode	mode)
  77{
  78	switch (mode) {
  79	case XFS_DAX_INODE:
  80		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
  81		break;
  82	case XFS_DAX_ALWAYS:
  83		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
  84		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
  85		break;
  86	case XFS_DAX_NEVER:
  87		mp->m_features |= XFS_FEAT_DAX_NEVER;
  88		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
  89		break;
  90	}
  91}
  92
  93static const struct constant_table dax_param_enums[] = {
  94	{"inode",	XFS_DAX_INODE },
  95	{"always",	XFS_DAX_ALWAYS },
  96	{"never",	XFS_DAX_NEVER },
  97	{}
  98};
  99
 100/*
 101 * Table driven mount option parser.
 102 */
 103enum {
 104	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
 105	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
 106	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
 107	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
 108	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
 109	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
 110	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
 111	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
 112	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
 113};
 114
 115static const struct fs_parameter_spec xfs_fs_parameters[] = {
 116	fsparam_u32("logbufs",		Opt_logbufs),
 117	fsparam_string("logbsize",	Opt_logbsize),
 118	fsparam_string("logdev",	Opt_logdev),
 119	fsparam_string("rtdev",		Opt_rtdev),
 120	fsparam_flag("wsync",		Opt_wsync),
 121	fsparam_flag("noalign",		Opt_noalign),
 122	fsparam_flag("swalloc",		Opt_swalloc),
 123	fsparam_u32("sunit",		Opt_sunit),
 124	fsparam_u32("swidth",		Opt_swidth),
 125	fsparam_flag("nouuid",		Opt_nouuid),
 126	fsparam_flag("grpid",		Opt_grpid),
 127	fsparam_flag("nogrpid",		Opt_nogrpid),
 128	fsparam_flag("bsdgroups",	Opt_bsdgroups),
 129	fsparam_flag("sysvgroups",	Opt_sysvgroups),
 130	fsparam_string("allocsize",	Opt_allocsize),
 131	fsparam_flag("norecovery",	Opt_norecovery),
 132	fsparam_flag("inode64",		Opt_inode64),
 133	fsparam_flag("inode32",		Opt_inode32),
 134	fsparam_flag("ikeep",		Opt_ikeep),
 135	fsparam_flag("noikeep",		Opt_noikeep),
 136	fsparam_flag("largeio",		Opt_largeio),
 137	fsparam_flag("nolargeio",	Opt_nolargeio),
 138	fsparam_flag("attr2",		Opt_attr2),
 139	fsparam_flag("noattr2",		Opt_noattr2),
 140	fsparam_flag("filestreams",	Opt_filestreams),
 141	fsparam_flag("quota",		Opt_quota),
 142	fsparam_flag("noquota",		Opt_noquota),
 143	fsparam_flag("usrquota",	Opt_usrquota),
 144	fsparam_flag("grpquota",	Opt_grpquota),
 145	fsparam_flag("prjquota",	Opt_prjquota),
 146	fsparam_flag("uquota",		Opt_uquota),
 147	fsparam_flag("gquota",		Opt_gquota),
 148	fsparam_flag("pquota",		Opt_pquota),
 149	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
 150	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
 151	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
 152	fsparam_flag("qnoenforce",	Opt_qnoenforce),
 153	fsparam_flag("discard",		Opt_discard),
 154	fsparam_flag("nodiscard",	Opt_nodiscard),
 155	fsparam_flag("dax",		Opt_dax),
 156	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
 157	{}
 
 
 158};
 159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 160struct proc_xfs_info {
 161	uint64_t	flag;
 162	char		*str;
 163};
 164
 165static int
 166xfs_fs_show_options(
 167	struct seq_file		*m,
 168	struct dentry		*root)
 169{
 170	static struct proc_xfs_info xfs_info_set[] = {
 171		/* the few simple ones we can get from the mount struct */
 172		{ XFS_FEAT_IKEEP,		",ikeep" },
 173		{ XFS_FEAT_WSYNC,		",wsync" },
 174		{ XFS_FEAT_NOALIGN,		",noalign" },
 175		{ XFS_FEAT_SWALLOC,		",swalloc" },
 176		{ XFS_FEAT_NOUUID,		",nouuid" },
 177		{ XFS_FEAT_NORECOVERY,		",norecovery" },
 178		{ XFS_FEAT_ATTR2,		",attr2" },
 179		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
 180		{ XFS_FEAT_GRPID,		",grpid" },
 181		{ XFS_FEAT_DISCARD,		",discard" },
 182		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
 183		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
 184		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
 
 
 
 
 
 185		{ 0, NULL }
 186	};
 187	struct xfs_mount	*mp = XFS_M(root->d_sb);
 188	struct proc_xfs_info	*xfs_infop;
 189
 190	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 191		if (mp->m_features & xfs_infop->flag)
 
 
 
 
 192			seq_puts(m, xfs_infop->str);
 193	}
 194
 195	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
 196
 197	if (xfs_has_allocsize(mp))
 198		seq_printf(m, ",allocsize=%dk",
 199			   (1 << mp->m_allocsize_log) >> 10);
 200
 201	if (mp->m_logbufs > 0)
 202		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
 203	if (mp->m_logbsize > 0)
 204		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
 205
 206	if (mp->m_logname)
 207		seq_show_option(m, "logdev", mp->m_logname);
 208	if (mp->m_rtname)
 209		seq_show_option(m, "rtdev", mp->m_rtname);
 210
 211	if (mp->m_dalign > 0)
 212		seq_printf(m, ",sunit=%d",
 213				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 214	if (mp->m_swidth > 0)
 215		seq_printf(m, ",swidth=%d",
 216				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 217
 218	if (mp->m_qflags & XFS_UQUOTA_ENFD)
 219		seq_puts(m, ",usrquota");
 220	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
 221		seq_puts(m, ",uqnoenforce");
 222
 223	if (mp->m_qflags & XFS_PQUOTA_ENFD)
 224		seq_puts(m, ",prjquota");
 225	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
 226		seq_puts(m, ",pqnoenforce");
 227
 228	if (mp->m_qflags & XFS_GQUOTA_ENFD)
 229		seq_puts(m, ",grpquota");
 230	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
 231		seq_puts(m, ",gqnoenforce");
 
 
 
 232
 233	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 234		seq_puts(m, ",noquota");
 
 235
 236	return 0;
 237}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238
 239static bool
 240xfs_set_inode_alloc_perag(
 241	struct xfs_perag	*pag,
 242	xfs_ino_t		ino,
 243	xfs_agnumber_t		max_metadata)
 244{
 245	if (!xfs_is_inode32(pag_mount(pag))) {
 246		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
 247		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 248		return false;
 249	}
 250
 251	if (ino > XFS_MAXINUMBER_32) {
 252		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
 253		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 254		return false;
 255	}
 256
 257	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
 258	if (pag_agno(pag) < max_metadata)
 259		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 260	else
 261		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
 262	return true;
 263}
 264
 265/*
 266 * Set parameters for inode allocation heuristics, taking into account
 267 * filesystem size and inode32/inode64 mount options; i.e. specifically
 268 * whether or not XFS_FEAT_SMALL_INUMS is set.
 269 *
 270 * Inode allocation patterns are altered only if inode32 is requested
 271 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
 272 * If altered, XFS_OPSTATE_INODE32 is set as well.
 273 *
 274 * An agcount independent of that in the mount structure is provided
 275 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 276 * to the potentially higher ag count.
 277 *
 278 * Returns the maximum AG index which may contain inodes.
 279 */
 280xfs_agnumber_t
 281xfs_set_inode_alloc(
 282	struct xfs_mount *mp,
 283	xfs_agnumber_t	agcount)
 284{
 285	xfs_agnumber_t	index;
 286	xfs_agnumber_t	maxagi = 0;
 287	xfs_sb_t	*sbp = &mp->m_sb;
 288	xfs_agnumber_t	max_metadata;
 289	xfs_agino_t	agino;
 290	xfs_ino_t	ino;
 291
 292	/*
 293	 * Calculate how much should be reserved for inodes to meet
 294	 * the max inode percentage.  Used only for inode32.
 295	 */
 296	if (M_IGEO(mp)->maxicount) {
 297		uint64_t	icount;
 298
 299		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 300		do_div(icount, 100);
 301		icount += sbp->sb_agblocks - 1;
 302		do_div(icount, sbp->sb_agblocks);
 303		max_metadata = icount;
 304	} else {
 305		max_metadata = agcount;
 306	}
 307
 308	/* Get the last possible inode in the filesystem */
 309	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
 310	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 311
 312	/*
 313	 * If user asked for no more than 32-bit inodes, and the fs is
 314	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
 315	 * the allocator to accommodate the request.
 316	 */
 317	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
 318		xfs_set_inode32(mp);
 319	else
 320		xfs_clear_inode32(mp);
 321
 322	for (index = 0; index < agcount; index++) {
 323		struct xfs_perag	*pag;
 324
 325		ino = XFS_AGINO_TO_INO(mp, index, agino);
 326
 327		pag = xfs_perag_get(mp, index);
 328		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
 329			maxagi++;
 330		xfs_perag_put(pag);
 331	}
 332
 333	return xfs_is_inode32(mp) ? maxagi : agcount;
 334}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335
 336static int
 337xfs_setup_dax_always(
 338	struct xfs_mount	*mp)
 339{
 340	if (!mp->m_ddev_targp->bt_daxdev &&
 341	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
 342		xfs_alert(mp,
 343			"DAX unsupported by block device. Turning off DAX.");
 344		goto disable_dax;
 345	}
 346
 347	if (mp->m_super->s_blocksize != PAGE_SIZE) {
 348		xfs_alert(mp,
 349			"DAX not supported for blocksize. Turning off DAX.");
 350		goto disable_dax;
 351	}
 352
 353	if (xfs_has_reflink(mp) &&
 354	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
 355		xfs_alert(mp,
 356			"DAX and reflink cannot work with multi-partitions!");
 357		return -EINVAL;
 358	}
 359
 360	return 0;
 361
 362disable_dax:
 363	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
 364	return 0;
 365}
 366
 367STATIC int
 368xfs_blkdev_get(
 369	xfs_mount_t		*mp,
 370	const char		*name,
 371	struct file		**bdev_filep)
 372{
 373	int			error = 0;
 374
 375	*bdev_filep = bdev_file_open_by_path(name,
 376		BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
 377		mp->m_super, &fs_holder_ops);
 378	if (IS_ERR(*bdev_filep)) {
 379		error = PTR_ERR(*bdev_filep);
 380		*bdev_filep = NULL;
 381		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
 382	}
 383
 384	return error;
 385}
 386
 387STATIC void
 388xfs_shutdown_devices(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389	struct xfs_mount	*mp)
 390{
 391	/*
 392	 * Udev is triggered whenever anyone closes a block device or unmounts
 393	 * a file systemm on a block device.
 394	 * The default udev rules invoke blkid to read the fs super and create
 395	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
 396	 * reads through the page cache.
 397	 *
 398	 * xfs_db also uses buffered reads to examine metadata.  There is no
 399	 * coordination between xfs_db and udev, which means that they can run
 400	 * concurrently.  Note there is no coordination between the kernel and
 401	 * blkid either.
 402	 *
 403	 * On a system with 64k pages, the page cache can cache the superblock
 404	 * and the root inode (and hence the root directory) with the same 64k
 405	 * page.  If udev spawns blkid after the mkfs and the system is busy
 406	 * enough that it is still running when xfs_db starts up, they'll both
 407	 * read from the same page in the pagecache.
 408	 *
 409	 * The unmount writes updated inode metadata to disk directly.  The XFS
 410	 * buffer cache does not use the bdev pagecache, so it needs to
 411	 * invalidate that pagecache on unmount.  If the above scenario occurs,
 412	 * the pagecache no longer reflects what's on disk, xfs_db reads the
 413	 * stale metadata, and fails to find /a.  Most of the time this succeeds
 414	 * because closing a bdev invalidates the page cache, but when processes
 415	 * race, everyone loses.
 416	 */
 417	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 418		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
 419		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
 
 
 
 
 420	}
 421	if (mp->m_rtdev_targp) {
 422		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
 423		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
 
 
 
 
 424	}
 425	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
 426	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
 427}
 428
 429/*
 430 * The file system configurations are:
 431 *	(1) device (partition) with data and internal log
 432 *	(2) logical volume with data and log subvolumes.
 433 *	(3) logical volume with data, log, and realtime subvolumes.
 434 *
 435 * We only have to handle opening the log and realtime volumes here if
 436 * they are present.  The data subvolume has already been opened by
 437 * get_sb_bdev() and is stored in sb->s_bdev.
 438 */
 439STATIC int
 440xfs_open_devices(
 441	struct xfs_mount	*mp)
 442{
 443	struct super_block	*sb = mp->m_super;
 444	struct block_device	*ddev = sb->s_bdev;
 445	struct file		*logdev_file = NULL, *rtdev_file = NULL;
 
 446	int			error;
 447
 448	/*
 449	 * Open real time and log devices - order is important.
 450	 */
 451	if (mp->m_logname) {
 452		error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file);
 453		if (error)
 454			return error;
 
 455	}
 456
 457	if (mp->m_rtname) {
 458		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file);
 459		if (error)
 460			goto out_close_logdev;
 461
 462		if (file_bdev(rtdev_file) == ddev ||
 463		    (logdev_file &&
 464		     file_bdev(rtdev_file) == file_bdev(logdev_file))) {
 465			xfs_warn(mp,
 466	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 467			error = -EINVAL;
 468			goto out_close_rtdev;
 469		}
 
 470	}
 471
 472	/*
 473	 * Setup xfs_mount buffer target pointers
 474	 */
 475	error = -ENOMEM;
 476	mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file);
 477	if (!mp->m_ddev_targp)
 478		goto out_close_rtdev;
 479
 480	if (rtdev_file) {
 481		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file);
 482		if (!mp->m_rtdev_targp)
 483			goto out_free_ddev_targ;
 484	}
 485
 486	if (logdev_file && file_bdev(logdev_file) != ddev) {
 487		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file);
 488		if (!mp->m_logdev_targp)
 489			goto out_free_rtdev_targ;
 490	} else {
 491		mp->m_logdev_targp = mp->m_ddev_targp;
 492		/* Handle won't be used, drop it */
 493		if (logdev_file)
 494			bdev_fput(logdev_file);
 495	}
 496
 497	return 0;
 498
 499 out_free_rtdev_targ:
 500	if (mp->m_rtdev_targp)
 501		xfs_free_buftarg(mp->m_rtdev_targp);
 502 out_free_ddev_targ:
 503	xfs_free_buftarg(mp->m_ddev_targp);
 504 out_close_rtdev:
 505	 if (rtdev_file)
 506		bdev_fput(rtdev_file);
 507 out_close_logdev:
 508	if (logdev_file)
 509		bdev_fput(logdev_file);
 
 
 
 
 510	return error;
 511}
 512
 513/*
 514 * Setup xfs_mount buffer target pointers based on superblock
 515 */
 516STATIC int
 517xfs_setup_devices(
 518	struct xfs_mount	*mp)
 519{
 520	int			error;
 521
 522	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
 523	if (error)
 524		return error;
 525
 526	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 527		unsigned int	log_sector_size = BBSIZE;
 528
 529		if (xfs_has_sector(mp))
 530			log_sector_size = mp->m_sb.sb_logsectsize;
 531		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 532					    log_sector_size);
 533		if (error)
 534			return error;
 535	}
 536	if (mp->m_rtdev_targp) {
 537		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 538					    mp->m_sb.sb_sectsize);
 539		if (error)
 540			return error;
 541	}
 542
 543	return 0;
 544}
 545
 546STATIC int
 547xfs_init_mount_workqueues(
 548	struct xfs_mount	*mp)
 549{
 550	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
 551			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 552			1, mp->m_super->s_id);
 553	if (!mp->m_buf_workqueue)
 554		goto out;
 555
 556	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
 557			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 558			0, mp->m_super->s_id);
 559	if (!mp->m_unwritten_workqueue)
 560		goto out_destroy_buf;
 561
 
 
 
 
 
 
 562	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
 563			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 564			0, mp->m_super->s_id);
 565	if (!mp->m_reclaim_workqueue)
 566		goto out_destroy_unwritten;
 567
 568	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
 569			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
 570			0, mp->m_super->s_id);
 571	if (!mp->m_blockgc_wq)
 572		goto out_destroy_reclaim;
 573
 574	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
 575			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
 576			1, mp->m_super->s_id);
 577	if (!mp->m_inodegc_wq)
 578		goto out_destroy_blockgc;
 579
 580	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
 581			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
 582	if (!mp->m_sync_workqueue)
 583		goto out_destroy_inodegc;
 584
 585	return 0;
 586
 587out_destroy_inodegc:
 588	destroy_workqueue(mp->m_inodegc_wq);
 589out_destroy_blockgc:
 590	destroy_workqueue(mp->m_blockgc_wq);
 591out_destroy_reclaim:
 592	destroy_workqueue(mp->m_reclaim_workqueue);
 
 
 593out_destroy_unwritten:
 594	destroy_workqueue(mp->m_unwritten_workqueue);
 595out_destroy_buf:
 596	destroy_workqueue(mp->m_buf_workqueue);
 597out:
 598	return -ENOMEM;
 599}
 600
 601STATIC void
 602xfs_destroy_mount_workqueues(
 603	struct xfs_mount	*mp)
 604{
 605	destroy_workqueue(mp->m_sync_workqueue);
 606	destroy_workqueue(mp->m_blockgc_wq);
 607	destroy_workqueue(mp->m_inodegc_wq);
 608	destroy_workqueue(mp->m_reclaim_workqueue);
 
 609	destroy_workqueue(mp->m_unwritten_workqueue);
 610	destroy_workqueue(mp->m_buf_workqueue);
 611}
 612
 613static void
 614xfs_flush_inodes_worker(
 615	struct work_struct	*work)
 616{
 617	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
 618						   m_flush_inodes_work);
 619	struct super_block	*sb = mp->m_super;
 620
 621	if (down_read_trylock(&sb->s_umount)) {
 622		sync_inodes_sb(sb);
 623		up_read(&sb->s_umount);
 624	}
 625}
 626
 627/*
 628 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 629 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 630 * for IO to complete so that we effectively throttle multiple callers to the
 631 * rate at which IO is completing.
 632 */
 633void
 634xfs_flush_inodes(
 635	struct xfs_mount	*mp)
 636{
 637	/*
 638	 * If flush_work() returns true then that means we waited for a flush
 639	 * which was already in progress.  Don't bother running another scan.
 640	 */
 641	if (flush_work(&mp->m_flush_inodes_work))
 642		return;
 643
 644	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
 645	flush_work(&mp->m_flush_inodes_work);
 
 
 646}
 647
 648/* Catch misguided souls that try to use this interface on XFS */
 649STATIC struct inode *
 650xfs_fs_alloc_inode(
 651	struct super_block	*sb)
 652{
 653	BUG();
 654	return NULL;
 655}
 656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657/*
 658 * Now that the generic code is guaranteed not to be accessing
 659 * the linux inode, we can inactivate and reclaim the inode.
 660 */
 661STATIC void
 662xfs_fs_destroy_inode(
 663	struct inode		*inode)
 664{
 665	struct xfs_inode	*ip = XFS_I(inode);
 666
 667	trace_xfs_destroy_inode(ip);
 668
 669	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 670	XFS_STATS_INC(ip->i_mount, vn_rele);
 671	XFS_STATS_INC(ip->i_mount, vn_remove);
 672	xfs_inode_mark_reclaimable(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673}
 674
 675static void
 676xfs_fs_dirty_inode(
 677	struct inode			*inode,
 678	int				flags)
 679{
 680	struct xfs_inode		*ip = XFS_I(inode);
 681	struct xfs_mount		*mp = ip->i_mount;
 682	struct xfs_trans		*tp;
 683
 684	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
 685		return;
 686
 687	/*
 688	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
 689	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
 690	 * in flags possibly together with I_DIRTY_SYNC.
 691	 */
 692	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
 693		return;
 694
 695	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
 696		return;
 697	xfs_ilock(ip, XFS_ILOCK_EXCL);
 698	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 699	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
 700	xfs_trans_commit(tp);
 701}
 702
 703/*
 704 * Slab object creation initialisation for the XFS inode.
 705 * This covers only the idempotent fields in the XFS inode;
 706 * all other fields need to be initialised on allocation
 707 * from the slab. This avoids the need to repeatedly initialise
 708 * fields in the xfs inode that left in the initialise state
 709 * when freeing the inode.
 710 */
 711STATIC void
 712xfs_fs_inode_init_once(
 713	void			*inode)
 714{
 715	struct xfs_inode	*ip = inode;
 716
 717	memset(ip, 0, sizeof(struct xfs_inode));
 718
 719	/* vfs inode */
 720	inode_init_once(VFS_I(ip));
 721
 722	/* xfs inode */
 723	atomic_set(&ip->i_pincount, 0);
 724	spin_lock_init(&ip->i_flags_lock);
 725	init_rwsem(&ip->i_lock);
 
 
 
 
 726}
 727
 728/*
 729 * We do an unlocked check for XFS_IDONTCACHE here because we are already
 730 * serialised against cache hits here via the inode->i_lock and igrab() in
 731 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
 732 * racing with us, and it avoids needing to grab a spinlock here for every inode
 733 * we drop the final reference on.
 734 */
 735STATIC int
 736xfs_fs_drop_inode(
 737	struct inode		*inode)
 738{
 739	struct xfs_inode	*ip = XFS_I(inode);
 740
 741	/*
 742	 * If this unlinked inode is in the middle of recovery, don't
 743	 * drop the inode just yet; log recovery will take care of
 744	 * that.  See the comment for this inode flag.
 745	 */
 746	if (ip->i_flags & XFS_IRECOVERY) {
 747		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
 748		return 0;
 749	}
 750
 751	return generic_drop_inode(inode);
 752}
 753
 754static void
 755xfs_mount_free(
 756	struct xfs_mount	*mp)
 757{
 758	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
 759		xfs_free_buftarg(mp->m_logdev_targp);
 760	if (mp->m_rtdev_targp)
 761		xfs_free_buftarg(mp->m_rtdev_targp);
 762	if (mp->m_ddev_targp)
 763		xfs_free_buftarg(mp->m_ddev_targp);
 764
 765	debugfs_remove(mp->m_debugfs);
 766	kfree(mp->m_rtname);
 767	kfree(mp->m_logname);
 768	kfree(mp);
 769}
 770
 771STATIC int
 772xfs_fs_sync_fs(
 773	struct super_block	*sb,
 774	int			wait)
 775{
 776	struct xfs_mount	*mp = XFS_M(sb);
 777	int			error;
 778
 779	trace_xfs_fs_sync_fs(mp, __return_address);
 780
 781	/*
 782	 * Doing anything during the async pass would be counterproductive.
 783	 */
 784	if (!wait)
 785		return 0;
 786
 787	error = xfs_log_force(mp, XFS_LOG_SYNC);
 788	if (error)
 789		return error;
 790
 791	if (laptop_mode) {
 792		/*
 793		 * The disk must be active because we're syncing.
 794		 * We schedule log work now (now that the disk is
 795		 * active) instead of later (when it might not be).
 796		 */
 797		flush_delayed_work(&mp->m_log->l_work);
 798	}
 799
 800	/*
 801	 * If we are called with page faults frozen out, it means we are about
 802	 * to freeze the transaction subsystem. Take the opportunity to shut
 803	 * down inodegc because once SB_FREEZE_FS is set it's too late to
 804	 * prevent inactivation races with freeze. The fs doesn't get called
 805	 * again by the freezing process until after SB_FREEZE_FS has been set,
 806	 * so it's now or never.  Same logic applies to speculative allocation
 807	 * garbage collection.
 808	 *
 809	 * We don't care if this is a normal syncfs call that does this or
 810	 * freeze that does this - we can run this multiple times without issue
 811	 * and we won't race with a restart because a restart can only occur
 812	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
 813	 */
 814	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
 815		xfs_inodegc_stop(mp);
 816		xfs_blockgc_stop(mp);
 817	}
 818
 819	return 0;
 820}
 821
 822STATIC int
 823xfs_fs_statfs(
 824	struct dentry		*dentry,
 825	struct kstatfs		*statp)
 826{
 827	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
 828	xfs_sb_t		*sbp = &mp->m_sb;
 829	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
 830	uint64_t		fakeinos, id;
 831	uint64_t		icount;
 832	uint64_t		ifree;
 833	uint64_t		fdblocks;
 834	xfs_extlen_t		lsize;
 835	int64_t			ffree;
 836
 837	/*
 838	 * Expedite background inodegc but don't wait. We do not want to block
 839	 * here waiting hours for a billion extent file to be truncated.
 840	 */
 841	xfs_inodegc_push(mp);
 842
 843	statp->f_type = XFS_SUPER_MAGIC;
 844	statp->f_namelen = MAXNAMELEN - 1;
 845
 846	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
 847	statp->f_fsid = u64_to_fsid(id);
 
 848
 849	icount = percpu_counter_sum(&mp->m_icount);
 850	ifree = percpu_counter_sum(&mp->m_ifree);
 851	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
 852
 853	spin_lock(&mp->m_sb_lock);
 854	statp->f_bsize = sbp->sb_blocksize;
 855	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
 856	statp->f_blocks = sbp->sb_dblocks - lsize;
 857	spin_unlock(&mp->m_sb_lock);
 858
 859	/* make sure statp->f_bfree does not underflow */
 860	statp->f_bfree = max_t(int64_t, 0,
 861				fdblocks - xfs_fdblocks_unavailable(mp));
 862	statp->f_bavail = statp->f_bfree;
 863
 864	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
 865	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
 866	if (M_IGEO(mp)->maxicount)
 867		statp->f_files = min_t(typeof(statp->f_files),
 868					statp->f_files,
 869					M_IGEO(mp)->maxicount);
 870
 871	/* If sb_icount overshot maxicount, report actual allocation */
 872	statp->f_files = max_t(typeof(statp->f_files),
 873					statp->f_files,
 874					sbp->sb_icount);
 875
 876	/* make sure statp->f_ffree does not underflow */
 877	ffree = statp->f_files - (icount - ifree);
 878	statp->f_ffree = max_t(int64_t, ffree, 0);
 879
 
 
 
 
 
 
 880	if (XFS_IS_REALTIME_MOUNT(mp) &&
 881	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
 882		s64	freertx;
 883
 884		statp->f_blocks = sbp->sb_rblocks;
 885		freertx = percpu_counter_sum_positive(&mp->m_frextents);
 886		statp->f_bavail = statp->f_bfree =
 887			xfs_rtbxlen_to_blen(mp, freertx);
 888	}
 889
 890	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
 891	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
 892			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
 893		xfs_qm_statvfs(ip, statp);
 894
 895	return 0;
 896}
 897
 898STATIC void
 899xfs_save_resvblks(struct xfs_mount *mp)
 900{
 
 
 901	mp->m_resblks_save = mp->m_resblks;
 902	xfs_reserve_blocks(mp, 0);
 903}
 904
 905STATIC void
 906xfs_restore_resvblks(struct xfs_mount *mp)
 907{
 908	uint64_t resblks;
 909
 910	if (mp->m_resblks_save) {
 911		resblks = mp->m_resblks_save;
 912		mp->m_resblks_save = 0;
 913	} else
 914		resblks = xfs_default_resblks(mp);
 915
 916	xfs_reserve_blocks(mp, resblks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917}
 918
 919/*
 920 * Second stage of a freeze. The data is already frozen so we only
 921 * need to take care of the metadata. Once that's done sync the superblock
 922 * to the log to dirty it in case of a crash while frozen. This ensures that we
 923 * will recover the unlinked inode lists on the next mount.
 924 */
 925STATIC int
 926xfs_fs_freeze(
 927	struct super_block	*sb)
 928{
 929	struct xfs_mount	*mp = XFS_M(sb);
 930	unsigned int		flags;
 931	int			ret;
 932
 933	/*
 934	 * The filesystem is now frozen far enough that memory reclaim
 935	 * cannot safely operate on the filesystem. Hence we need to
 936	 * set a GFP_NOFS context here to avoid recursion deadlocks.
 937	 */
 938	flags = memalloc_nofs_save();
 939	xfs_save_resvblks(mp);
 940	ret = xfs_log_quiesce(mp);
 941	memalloc_nofs_restore(flags);
 942
 943	/*
 944	 * For read-write filesystems, we need to restart the inodegc on error
 945	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
 946	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
 947	 * here, so we can restart safely without racing with a stop in
 948	 * xfs_fs_sync_fs().
 949	 */
 950	if (ret && !xfs_is_readonly(mp)) {
 951		xfs_blockgc_start(mp);
 952		xfs_inodegc_start(mp);
 953	}
 954
 955	return ret;
 956}
 957
 958STATIC int
 959xfs_fs_unfreeze(
 960	struct super_block	*sb)
 961{
 962	struct xfs_mount	*mp = XFS_M(sb);
 963
 964	xfs_restore_resvblks(mp);
 965	xfs_log_work_queue(mp);
 
 
 
 966
 967	/*
 968	 * Don't reactivate the inodegc worker on a readonly filesystem because
 969	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
 970	 * worker because there are no speculative preallocations on a readonly
 971	 * filesystem.
 972	 */
 973	if (!xfs_is_readonly(mp)) {
 974		xfs_blockgc_start(mp);
 975		xfs_inodegc_start(mp);
 976	}
 977
 978	return 0;
 979}
 980
 981/*
 982 * This function fills in xfs_mount_t fields based on mount args.
 983 * Note: the superblock _has_ now been read in.
 984 */
 985STATIC int
 986xfs_finish_flags(
 987	struct xfs_mount	*mp)
 988{
 
 
 989	/* Fail a mount where the logbuf is smaller than the log stripe */
 990	if (xfs_has_logv2(mp)) {
 991		if (mp->m_logbsize <= 0 &&
 992		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
 993			mp->m_logbsize = mp->m_sb.sb_logsunit;
 994		} else if (mp->m_logbsize > 0 &&
 995			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
 996			xfs_warn(mp,
 997		"logbuf size must be greater than or equal to log stripe size");
 998			return -EINVAL;
 999		}
1000	} else {
1001		/* Fail a mount if the logbuf is larger than 32K */
1002		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1003			xfs_warn(mp,
1004		"logbuf size for version 1 logs must be 16K or 32K");
1005			return -EINVAL;
1006		}
1007	}
1008
1009	/*
1010	 * V5 filesystems always use attr2 format for attributes.
1011	 */
1012	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
 
1013		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1014			     "attr2 is always enabled for V5 filesystems.");
1015		return -EINVAL;
1016	}
1017
1018	/*
 
 
 
 
 
 
 
 
1019	 * prohibit r/w mounts of read-only filesystems
1020	 */
1021	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1022		xfs_warn(mp,
1023			"cannot mount a read-only filesystem as read-write");
1024		return -EROFS;
1025	}
1026
1027	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1028	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1029	    !xfs_has_pquotino(mp)) {
1030		xfs_warn(mp,
1031		  "Super block does not support project and group quota together");
1032		return -EINVAL;
1033	}
1034
1035	return 0;
1036}
1037
1038static int
1039xfs_init_percpu_counters(
1040	struct xfs_mount	*mp)
1041{
1042	int		error;
1043
1044	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1045	if (error)
1046		return -ENOMEM;
1047
1048	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1049	if (error)
1050		goto free_icount;
1051
1052	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1053	if (error)
1054		goto free_ifree;
1055
1056	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1057	if (error)
1058		goto free_fdblocks;
1059
1060	error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL);
1061	if (error)
1062		goto free_delalloc;
1063
1064	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1065	if (error)
1066		goto free_delalloc_rt;
1067
1068	return 0;
1069
1070free_delalloc_rt:
1071	percpu_counter_destroy(&mp->m_delalloc_rtextents);
1072free_delalloc:
1073	percpu_counter_destroy(&mp->m_delalloc_blks);
1074free_fdblocks:
1075	percpu_counter_destroy(&mp->m_fdblocks);
1076free_ifree:
1077	percpu_counter_destroy(&mp->m_ifree);
1078free_icount:
1079	percpu_counter_destroy(&mp->m_icount);
1080	return -ENOMEM;
1081}
1082
1083void
1084xfs_reinit_percpu_counters(
1085	struct xfs_mount	*mp)
1086{
1087	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1088	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1089	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1090	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1091}
1092
1093static void
1094xfs_destroy_percpu_counters(
1095	struct xfs_mount	*mp)
1096{
1097	percpu_counter_destroy(&mp->m_icount);
1098	percpu_counter_destroy(&mp->m_ifree);
1099	percpu_counter_destroy(&mp->m_fdblocks);
1100	ASSERT(xfs_is_shutdown(mp) ||
1101	       percpu_counter_sum(&mp->m_delalloc_rtextents) == 0);
1102	percpu_counter_destroy(&mp->m_delalloc_rtextents);
1103	ASSERT(xfs_is_shutdown(mp) ||
1104	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1105	percpu_counter_destroy(&mp->m_delalloc_blks);
1106	percpu_counter_destroy(&mp->m_frextents);
1107}
1108
1109static int
1110xfs_inodegc_init_percpu(
1111	struct xfs_mount	*mp)
1112{
1113	struct xfs_inodegc	*gc;
1114	int			cpu;
1115
1116	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1117	if (!mp->m_inodegc)
1118		return -ENOMEM;
1119
1120	for_each_possible_cpu(cpu) {
1121		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1122		gc->cpu = cpu;
1123		gc->mp = mp;
1124		init_llist_head(&gc->list);
1125		gc->items = 0;
1126		gc->error = 0;
1127		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1128	}
1129	return 0;
1130}
1131
1132static void
1133xfs_inodegc_free_percpu(
1134	struct xfs_mount	*mp)
1135{
1136	if (!mp->m_inodegc)
1137		return;
1138	free_percpu(mp->m_inodegc);
1139}
1140
1141static void
1142xfs_fs_put_super(
1143	struct super_block	*sb)
1144{
1145	struct xfs_mount	*mp = XFS_M(sb);
1146
1147	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1148	xfs_filestream_unmount(mp);
1149	xfs_unmountfs(mp);
1150
1151	xfs_rtmount_freesb(mp);
1152	xfs_freesb(mp);
1153	xchk_mount_stats_free(mp);
1154	free_percpu(mp->m_stats.xs_stats);
1155	xfs_inodegc_free_percpu(mp);
1156	xfs_destroy_percpu_counters(mp);
1157	xfs_destroy_mount_workqueues(mp);
1158	xfs_shutdown_devices(mp);
1159}
1160
1161static long
1162xfs_fs_nr_cached_objects(
1163	struct super_block	*sb,
1164	struct shrink_control	*sc)
1165{
1166	/* Paranoia: catch incorrect calls during mount setup or teardown */
1167	if (WARN_ON_ONCE(!sb->s_fs_info))
1168		return 0;
1169	return xfs_reclaim_inodes_count(XFS_M(sb));
1170}
1171
1172static long
1173xfs_fs_free_cached_objects(
1174	struct super_block	*sb,
1175	struct shrink_control	*sc)
1176{
1177	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1178}
1179
1180static void
1181xfs_fs_shutdown(
1182	struct super_block	*sb)
1183{
1184	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1185}
1186
1187static const struct super_operations xfs_super_operations = {
1188	.alloc_inode		= xfs_fs_alloc_inode,
1189	.destroy_inode		= xfs_fs_destroy_inode,
1190	.dirty_inode		= xfs_fs_dirty_inode,
1191	.drop_inode		= xfs_fs_drop_inode,
1192	.put_super		= xfs_fs_put_super,
1193	.sync_fs		= xfs_fs_sync_fs,
1194	.freeze_fs		= xfs_fs_freeze,
1195	.unfreeze_fs		= xfs_fs_unfreeze,
1196	.statfs			= xfs_fs_statfs,
1197	.show_options		= xfs_fs_show_options,
1198	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1199	.free_cached_objects	= xfs_fs_free_cached_objects,
1200	.shutdown		= xfs_fs_shutdown,
1201};
1202
1203static int
1204suffix_kstrtoint(
1205	const char	*s,
1206	unsigned int	base,
1207	int		*res)
1208{
1209	int		last, shift_left_factor = 0, _res;
1210	char		*value;
1211	int		ret = 0;
1212
1213	value = kstrdup(s, GFP_KERNEL);
1214	if (!value)
1215		return -ENOMEM;
1216
1217	last = strlen(value) - 1;
1218	if (value[last] == 'K' || value[last] == 'k') {
1219		shift_left_factor = 10;
1220		value[last] = '\0';
1221	}
1222	if (value[last] == 'M' || value[last] == 'm') {
1223		shift_left_factor = 20;
1224		value[last] = '\0';
1225	}
1226	if (value[last] == 'G' || value[last] == 'g') {
1227		shift_left_factor = 30;
1228		value[last] = '\0';
1229	}
1230
1231	if (kstrtoint(value, base, &_res))
1232		ret = -EINVAL;
1233	kfree(value);
1234	*res = _res << shift_left_factor;
1235	return ret;
1236}
1237
1238static inline void
1239xfs_fs_warn_deprecated(
1240	struct fs_context	*fc,
1241	struct fs_parameter	*param,
1242	uint64_t		flag,
1243	bool			value)
1244{
1245	/* Don't print the warning if reconfiguring and current mount point
1246	 * already had the flag set
1247	 */
1248	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1249            !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1250		return;
1251	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1252}
1253
1254/*
1255 * Set mount state from a mount option.
1256 *
1257 * NOTE: mp->m_super is NULL here!
1258 */
1259static int
1260xfs_fs_parse_param(
1261	struct fs_context	*fc,
1262	struct fs_parameter	*param)
1263{
1264	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1265	struct fs_parse_result	result;
1266	int			size = 0;
1267	int			opt;
1268
1269	BUILD_BUG_ON(XFS_QFLAGS_MNTOPTS & XFS_MOUNT_QUOTA_ALL);
1270
1271	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1272	if (opt < 0)
1273		return opt;
1274
1275	switch (opt) {
1276	case Opt_logbufs:
1277		parsing_mp->m_logbufs = result.uint_32;
1278		return 0;
1279	case Opt_logbsize:
1280		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1281			return -EINVAL;
1282		return 0;
1283	case Opt_logdev:
1284		kfree(parsing_mp->m_logname);
1285		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1286		if (!parsing_mp->m_logname)
1287			return -ENOMEM;
1288		return 0;
1289	case Opt_rtdev:
1290		kfree(parsing_mp->m_rtname);
1291		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1292		if (!parsing_mp->m_rtname)
1293			return -ENOMEM;
1294		return 0;
1295	case Opt_allocsize:
1296		if (suffix_kstrtoint(param->string, 10, &size))
1297			return -EINVAL;
1298		parsing_mp->m_allocsize_log = ffs(size) - 1;
1299		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1300		return 0;
1301	case Opt_grpid:
1302	case Opt_bsdgroups:
1303		parsing_mp->m_features |= XFS_FEAT_GRPID;
1304		return 0;
1305	case Opt_nogrpid:
1306	case Opt_sysvgroups:
1307		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1308		return 0;
1309	case Opt_wsync:
1310		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1311		return 0;
1312	case Opt_norecovery:
1313		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1314		return 0;
1315	case Opt_noalign:
1316		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1317		return 0;
1318	case Opt_swalloc:
1319		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1320		return 0;
1321	case Opt_sunit:
1322		parsing_mp->m_dalign = result.uint_32;
1323		return 0;
1324	case Opt_swidth:
1325		parsing_mp->m_swidth = result.uint_32;
1326		return 0;
1327	case Opt_inode32:
1328		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1329		return 0;
1330	case Opt_inode64:
1331		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1332		return 0;
1333	case Opt_nouuid:
1334		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1335		return 0;
1336	case Opt_largeio:
1337		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1338		return 0;
1339	case Opt_nolargeio:
1340		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1341		return 0;
1342	case Opt_filestreams:
1343		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1344		return 0;
1345	case Opt_noquota:
1346		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1347		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1348		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1349		return 0;
1350	case Opt_quota:
1351	case Opt_uquota:
1352	case Opt_usrquota:
1353		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1354		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1355		return 0;
1356	case Opt_qnoenforce:
1357	case Opt_uqnoenforce:
1358		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1359		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1360		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1361		return 0;
1362	case Opt_pquota:
1363	case Opt_prjquota:
1364		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1365		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1366		return 0;
1367	case Opt_pqnoenforce:
1368		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1369		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1370		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1371		return 0;
1372	case Opt_gquota:
1373	case Opt_grpquota:
1374		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1375		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1376		return 0;
1377	case Opt_gqnoenforce:
1378		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1379		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1380		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1381		return 0;
1382	case Opt_discard:
1383		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1384		return 0;
1385	case Opt_nodiscard:
1386		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1387		return 0;
1388#ifdef CONFIG_FS_DAX
1389	case Opt_dax:
1390		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1391		return 0;
1392	case Opt_dax_enum:
1393		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1394		return 0;
1395#endif
1396	/* Following mount options will be removed in September 2025 */
1397	case Opt_ikeep:
1398		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1399		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1400		return 0;
1401	case Opt_noikeep:
1402		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1403		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1404		return 0;
1405	case Opt_attr2:
1406		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1407		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1408		return 0;
1409	case Opt_noattr2:
1410		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1411		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1412		return 0;
1413	default:
1414		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1415		return -EINVAL;
1416	}
1417
1418	return 0;
1419}
1420
1421static int
1422xfs_fs_validate_params(
1423	struct xfs_mount	*mp)
1424{
1425	/* No recovery flag requires a read-only mount */
1426	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1427		xfs_warn(mp, "no-recovery mounts must be read-only.");
1428		return -EINVAL;
1429	}
1430
 
 
 
 
 
 
 
 
 
 
 
1431	/*
1432	 * We have not read the superblock at this point, so only the attr2
1433	 * mount option can set the attr2 feature by this stage.
 
 
1434	 */
1435	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1436		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1437		return -EINVAL;
1438	}
1439
1440
1441	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1442		xfs_warn(mp,
1443	"sunit and swidth options incompatible with the noalign option");
1444		return -EINVAL;
1445	}
1446
1447	if (!IS_ENABLED(CONFIG_XFS_QUOTA) &&
1448	    (mp->m_qflags & ~XFS_QFLAGS_MNTOPTS)) {
1449		xfs_warn(mp, "quota support not available in this kernel.");
1450		return -EINVAL;
1451	}
1452
1453	if ((mp->m_dalign && !mp->m_swidth) ||
1454	    (!mp->m_dalign && mp->m_swidth)) {
1455		xfs_warn(mp, "sunit and swidth must be specified together");
1456		return -EINVAL;
1457	}
1458
1459	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1460		xfs_warn(mp,
1461	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1462			mp->m_swidth, mp->m_dalign);
1463		return -EINVAL;
1464	}
1465
1466	if (mp->m_logbufs != -1 &&
1467	    mp->m_logbufs != 0 &&
1468	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1469	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1470		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1471			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1472		return -EINVAL;
1473	}
1474
1475	if (mp->m_logbsize != -1 &&
1476	    mp->m_logbsize !=  0 &&
1477	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1478	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1479	     !is_power_of_2(mp->m_logbsize))) {
1480		xfs_warn(mp,
1481			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1482			mp->m_logbsize);
1483		return -EINVAL;
1484	}
1485
1486	if (xfs_has_allocsize(mp) &&
1487	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1488	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1489		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1490			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1491		return -EINVAL;
1492	}
1493
1494	return 0;
1495}
1496
1497struct dentry *
1498xfs_debugfs_mkdir(
1499	const char	*name,
1500	struct dentry	*parent)
1501{
1502	struct dentry	*child;
1503
1504	/* Apparently we're expected to ignore error returns?? */
1505	child = debugfs_create_dir(name, parent);
1506	if (IS_ERR(child))
1507		return NULL;
1508
1509	return child;
1510}
1511
1512static int
1513xfs_fs_fill_super(
1514	struct super_block	*sb,
1515	struct fs_context	*fc)
 
1516{
1517	struct xfs_mount	*mp = sb->s_fs_info;
1518	struct inode		*root;
1519	int			flags = 0, error;
1520
1521	mp->m_super = sb;
1522
1523	/*
1524	 * Copy VFS mount flags from the context now that all parameter parsing
1525	 * is guaranteed to have been completed by either the old mount API or
1526	 * the newer fsopen/fsconfig API.
1527	 */
1528	if (fc->sb_flags & SB_RDONLY)
1529		xfs_set_readonly(mp);
1530	if (fc->sb_flags & SB_DIRSYNC)
1531		mp->m_features |= XFS_FEAT_DIRSYNC;
1532	if (fc->sb_flags & SB_SYNCHRONOUS)
1533		mp->m_features |= XFS_FEAT_WSYNC;
1534
1535	error = xfs_fs_validate_params(mp);
1536	if (error)
1537		return error;
1538
1539	sb_min_blocksize(sb, BBSIZE);
1540	sb->s_xattr = xfs_xattr_handlers;
1541	sb->s_export_op = &xfs_export_operations;
1542#ifdef CONFIG_XFS_QUOTA
1543	sb->s_qcop = &xfs_quotactl_operations;
1544	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1545#endif
1546	sb->s_op = &xfs_super_operations;
1547
1548	/*
1549	 * Delay mount work if the debug hook is set. This is debug
1550	 * instrumention to coordinate simulation of xfs mount failures with
1551	 * VFS superblock operations
1552	 */
1553	if (xfs_globals.mount_delay) {
1554		xfs_notice(mp, "Delaying mount for %d seconds.",
1555			xfs_globals.mount_delay);
1556		msleep(xfs_globals.mount_delay * 1000);
1557	}
1558
1559	if (fc->sb_flags & SB_SILENT)
1560		flags |= XFS_MFSI_QUIET;
1561
1562	error = xfs_open_devices(mp);
1563	if (error)
1564		return error;
1565
1566	if (xfs_debugfs) {
1567		mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1568						  xfs_debugfs);
1569	} else {
1570		mp->m_debugfs = NULL;
1571	}
1572
1573	error = xfs_init_mount_workqueues(mp);
1574	if (error)
1575		goto out_shutdown_devices;
1576
1577	error = xfs_init_percpu_counters(mp);
1578	if (error)
1579		goto out_destroy_workqueues;
1580
1581	error = xfs_inodegc_init_percpu(mp);
1582	if (error)
1583		goto out_destroy_counters;
1584
1585	/* Allocate stats memory before we do operations that might use it */
1586	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1587	if (!mp->m_stats.xs_stats) {
1588		error = -ENOMEM;
1589		goto out_destroy_inodegc;
1590	}
1591
1592	error = xchk_mount_stats_alloc(mp);
1593	if (error)
1594		goto out_free_stats;
1595
1596	error = xfs_readsb(mp, flags);
1597	if (error)
1598		goto out_free_scrub_stats;
1599
1600	error = xfs_finish_flags(mp);
1601	if (error)
1602		goto out_free_sb;
1603
1604	error = xfs_setup_devices(mp);
1605	if (error)
1606		goto out_free_sb;
1607
1608	/*
1609	 * V4 support is undergoing deprecation.
1610	 *
1611	 * Note: this has to use an open coded m_features check as xfs_has_crc
1612	 * always returns false for !CONFIG_XFS_SUPPORT_V4.
1613	 */
1614	if (!(mp->m_features & XFS_FEAT_CRC)) {
1615		if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) {
1616			xfs_warn(mp,
1617	"Deprecated V4 format (crc=0) not supported by kernel.");
1618			error = -EINVAL;
1619			goto out_free_sb;
1620		}
1621		xfs_warn_once(mp,
1622	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1623	}
1624
1625	/* ASCII case insensitivity is undergoing deprecation. */
1626	if (xfs_has_asciici(mp)) {
1627#ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1628		xfs_warn_once(mp,
1629	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1630#else
1631		xfs_warn(mp,
1632	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1633		error = -EINVAL;
1634		goto out_free_sb;
1635#endif
1636	}
1637
1638	/* Filesystem claims it needs repair, so refuse the mount. */
1639	if (xfs_has_needsrepair(mp)) {
1640		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1641		error = -EFSCORRUPTED;
1642		goto out_free_sb;
1643	}
1644
1645	/*
1646	 * Don't touch the filesystem if a user tool thinks it owns the primary
1647	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1648	 * we don't check them at all.
1649	 */
1650	if (mp->m_sb.sb_inprogress) {
1651		xfs_warn(mp, "Offline file system operation in progress!");
1652		error = -EFSCORRUPTED;
1653		goto out_free_sb;
1654	}
1655
1656	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1657		size_t max_folio_size = mapping_max_folio_size_supported();
1658
1659		if (!xfs_has_crc(mp)) {
1660			xfs_warn(mp,
1661"V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
1662				mp->m_sb.sb_blocksize, PAGE_SIZE);
1663			error = -ENOSYS;
1664			goto out_free_sb;
1665		}
1666
1667		if (mp->m_sb.sb_blocksize > max_folio_size) {
1668			xfs_warn(mp,
1669"block size (%u bytes) not supported; Only block size (%zu) or less is supported",
1670				mp->m_sb.sb_blocksize, max_folio_size);
1671			error = -ENOSYS;
1672			goto out_free_sb;
1673		}
1674
1675		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_LBS);
1676	}
1677
1678	/* Ensure this filesystem fits in the page cache limits */
1679	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1680	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1681		xfs_warn(mp,
1682		"file system too large to be mounted on this system.");
1683		error = -EFBIG;
1684		goto out_free_sb;
1685	}
1686
1687	/*
1688	 * XFS block mappings use 54 bits to store the logical block offset.
1689	 * This should suffice to handle the maximum file size that the VFS
1690	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1691	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1692	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1693	 * to check this assertion.
1694	 *
1695	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1696	 * maximum pagecache offset in units of fs blocks.
1697	 */
1698	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1699		xfs_warn(mp,
1700"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1701			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1702			 XFS_MAX_FILEOFF);
1703		error = -EINVAL;
1704		goto out_free_sb;
1705	}
1706
1707	error = xfs_rtmount_readsb(mp);
1708	if (error)
1709		goto out_free_sb;
1710
1711	error = xfs_filestream_mount(mp);
1712	if (error)
1713		goto out_free_rtsb;
1714
1715	/*
1716	 * we must configure the block size in the superblock before we run the
1717	 * full mount process as the mount process can lookup and cache inodes.
1718	 */
1719	sb->s_magic = XFS_SUPER_MAGIC;
1720	sb->s_blocksize = mp->m_sb.sb_blocksize;
1721	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1722	sb->s_maxbytes = MAX_LFS_FILESIZE;
1723	sb->s_max_links = XFS_MAXLINK;
1724	sb->s_time_gran = 1;
1725	if (xfs_has_bigtime(mp)) {
1726		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1727		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1728	} else {
1729		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1730		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1731	}
1732	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1733	sb->s_iflags |= SB_I_CGROUPWB;
1734
1735	set_posix_acl_flag(sb);
1736
1737	/* version 5 superblocks support inode version counters. */
1738	if (xfs_has_crc(mp))
1739		sb->s_flags |= SB_I_VERSION;
1740
1741	if (xfs_has_dax_always(mp)) {
1742		error = xfs_setup_dax_always(mp);
1743		if (error)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1744			goto out_filestream_unmount;
 
1745	}
1746
1747	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1748		xfs_warn(mp,
1749	"mounting with \"discard\" option, but the device does not support discard");
1750		mp->m_features &= ~XFS_FEAT_DISCARD;
 
 
 
 
1751	}
1752
1753	if (xfs_has_metadir(mp))
1754		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR);
1755
1756	if (xfs_has_reflink(mp)) {
1757		if (mp->m_sb.sb_rblocks) {
1758			xfs_alert(mp,
1759	"reflink not compatible with realtime device!");
1760			error = -EINVAL;
1761			goto out_filestream_unmount;
1762		}
1763
1764		if (xfs_globals.always_cow) {
1765			xfs_info(mp, "using DEBUG-only always_cow mode.");
1766			mp->m_always_cow = true;
1767		}
1768	}
1769
1770	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1771		xfs_alert(mp,
1772	"reverse mapping btree not compatible with realtime device!");
1773		error = -EINVAL;
1774		goto out_filestream_unmount;
1775	}
1776
1777	if (xfs_has_exchange_range(mp))
1778		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_EXCHRANGE);
1779
1780	if (xfs_has_parent(mp))
1781		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_PPTR);
1782
1783	/*
1784	 * If no quota mount options were provided, maybe we'll try to pick
1785	 * up the quota accounting and enforcement flags from the ondisk sb.
1786	 */
1787	if (!(mp->m_qflags & XFS_QFLAGS_MNTOPTS))
1788		xfs_set_resuming_quotaon(mp);
1789	mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
1790
1791	error = xfs_mountfs(mp);
1792	if (error)
1793		goto out_filestream_unmount;
1794
1795	root = igrab(VFS_I(mp->m_rootip));
1796	if (!root) {
1797		error = -ENOENT;
1798		goto out_unmount;
1799	}
1800	sb->s_root = d_make_root(root);
1801	if (!sb->s_root) {
1802		error = -ENOMEM;
1803		goto out_unmount;
1804	}
1805
1806	return 0;
1807
1808 out_filestream_unmount:
1809	xfs_filestream_unmount(mp);
1810 out_free_rtsb:
1811	xfs_rtmount_freesb(mp);
1812 out_free_sb:
1813	xfs_freesb(mp);
1814 out_free_scrub_stats:
1815	xchk_mount_stats_free(mp);
1816 out_free_stats:
1817	free_percpu(mp->m_stats.xs_stats);
1818 out_destroy_inodegc:
1819	xfs_inodegc_free_percpu(mp);
1820 out_destroy_counters:
1821	xfs_destroy_percpu_counters(mp);
1822 out_destroy_workqueues:
1823	xfs_destroy_mount_workqueues(mp);
1824 out_shutdown_devices:
1825	xfs_shutdown_devices(mp);
 
 
 
 
 
1826	return error;
1827
1828 out_unmount:
1829	xfs_filestream_unmount(mp);
1830	xfs_unmountfs(mp);
1831	goto out_free_rtsb;
1832}
1833
1834static int
1835xfs_fs_get_tree(
1836	struct fs_context	*fc)
1837{
1838	return get_tree_bdev(fc, xfs_fs_fill_super);
1839}
1840
1841static int
1842xfs_remount_rw(
1843	struct xfs_mount	*mp)
1844{
1845	struct xfs_sb		*sbp = &mp->m_sb;
1846	int error;
1847
1848	if (xfs_has_norecovery(mp)) {
1849		xfs_warn(mp,
1850			"ro->rw transition prohibited on norecovery mount");
1851		return -EINVAL;
1852	}
1853
1854	if (xfs_sb_is_v5(sbp) &&
1855	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1856		xfs_warn(mp,
1857	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1858			(sbp->sb_features_ro_compat &
1859				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1860		return -EINVAL;
1861	}
1862
1863	xfs_clear_readonly(mp);
1864
1865	/*
1866	 * If this is the first remount to writeable state we might have some
1867	 * superblock changes to update.
1868	 */
1869	if (mp->m_update_sb) {
1870		error = xfs_sync_sb(mp, false);
1871		if (error) {
1872			xfs_warn(mp, "failed to write sb changes");
1873			return error;
1874		}
1875		mp->m_update_sb = false;
1876	}
1877
1878	/*
1879	 * Fill out the reserve pool if it is empty. Use the stashed value if
1880	 * it is non-zero, otherwise go with the default.
1881	 */
1882	xfs_restore_resvblks(mp);
1883	xfs_log_work_queue(mp);
1884	xfs_blockgc_start(mp);
1885
1886	/* Create the per-AG metadata reservation pool .*/
1887	error = xfs_fs_reserve_ag_blocks(mp);
1888	if (error && error != -ENOSPC)
1889		return error;
1890
1891	/* Re-enable the background inode inactivation worker. */
1892	xfs_inodegc_start(mp);
1893
1894	return 0;
1895}
1896
1897static int
1898xfs_remount_ro(
1899	struct xfs_mount	*mp)
 
 
 
1900{
1901	struct xfs_icwalk	icw = {
1902		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1903	};
1904	int			error;
1905
1906	/* Flush all the dirty data to disk. */
1907	error = sync_filesystem(mp->m_super);
1908	if (error)
1909		return error;
1910
1911	/*
1912	 * Cancel background eofb scanning so it cannot race with the final
1913	 * log force+buftarg wait and deadlock the remount.
1914	 */
1915	xfs_blockgc_stop(mp);
1916
1917	/*
1918	 * Clear out all remaining COW staging extents and speculative post-EOF
1919	 * preallocations so that we don't leave inodes requiring inactivation
1920	 * cleanups during reclaim on a read-only mount.  We must process every
1921	 * cached inode, so this requires a synchronous cache scan.
1922	 */
1923	error = xfs_blockgc_free_space(mp, &icw);
1924	if (error) {
1925		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1926		return error;
1927	}
1928
1929	/*
1930	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1931	 * flushed all pending inodegc work when it sync'd the filesystem.
1932	 * The VFS holds s_umount, so we know that inodes cannot enter
1933	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1934	 * we send inodes straight to reclaim, so no inodes will be queued.
1935	 */
1936	xfs_inodegc_stop(mp);
1937
1938	/* Free the per-AG metadata reservation pool. */
1939	xfs_fs_unreserve_ag_blocks(mp);
1940
1941	/*
1942	 * Before we sync the metadata, we need to free up the reserve block
1943	 * pool so that the used block count in the superblock on disk is
1944	 * correct at the end of the remount. Stash the current* reserve pool
1945	 * size so that if we get remounted rw, we can return it to the same
1946	 * size.
1947	 */
1948	xfs_save_resvblks(mp);
1949
1950	xfs_log_clean(mp);
1951	xfs_set_readonly(mp);
1952
1953	return 0;
1954}
1955
1956/*
1957 * Logically we would return an error here to prevent users from believing
1958 * they might have changed mount options using remount which can't be changed.
1959 *
1960 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1961 * arguments in some cases so we can't blindly reject options, but have to
1962 * check for each specified option if it actually differs from the currently
1963 * set option and only reject it if that's the case.
1964 *
1965 * Until that is implemented we return success for every remount request, and
1966 * silently ignore all options that we can't actually change.
1967 */
1968static int
1969xfs_fs_reconfigure(
1970	struct fs_context *fc)
1971{
1972	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1973	struct xfs_mount        *new_mp = fc->s_fs_info;
1974	int			flags = fc->sb_flags;
1975	int			error;
1976
1977	new_mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
1978
1979	/* version 5 superblocks always support version counters. */
1980	if (xfs_has_crc(mp))
1981		fc->sb_flags |= SB_I_VERSION;
1982
1983	error = xfs_fs_validate_params(new_mp);
1984	if (error)
1985		return error;
1986
1987	/* inode32 -> inode64 */
1988	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1989		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1990		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1991	}
1992
1993	/* inode64 -> inode32 */
1994	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1995		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1996		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1997	}
1998
1999	/* ro -> rw */
2000	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
2001		error = xfs_remount_rw(mp);
2002		if (error)
2003			return error;
2004	}
2005
2006	/* rw -> ro */
2007	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
2008		error = xfs_remount_ro(mp);
2009		if (error)
2010			return error;
2011	}
2012
2013	return 0;
2014}
2015
2016static void
2017xfs_fs_free(
2018	struct fs_context	*fc)
 
2019{
2020	struct xfs_mount	*mp = fc->s_fs_info;
2021
2022	/*
2023	 * mp is stored in the fs_context when it is initialized.
2024	 * mp is transferred to the superblock on a successful mount,
2025	 * but if an error occurs before the transfer we have to free
2026	 * it here.
2027	 */
2028	if (mp)
2029		xfs_mount_free(mp);
2030}
2031
2032static const struct fs_context_operations xfs_context_ops = {
2033	.parse_param = xfs_fs_parse_param,
2034	.get_tree    = xfs_fs_get_tree,
2035	.reconfigure = xfs_fs_reconfigure,
2036	.free        = xfs_fs_free,
 
 
 
 
 
 
 
 
 
2037};
2038
2039/*
2040 * WARNING: do not initialise any parameters in this function that depend on
2041 * mount option parsing having already been performed as this can be called from
2042 * fsopen() before any parameters have been set.
2043 */
2044static int
2045xfs_init_fs_context(
2046	struct fs_context	*fc)
2047{
2048	struct xfs_mount	*mp;
2049	int			i;
2050
2051	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
2052	if (!mp)
2053		return -ENOMEM;
2054
2055	spin_lock_init(&mp->m_sb_lock);
2056	for (i = 0; i < XG_TYPE_MAX; i++)
2057		xa_init(&mp->m_groups[i].xa);
2058	mutex_init(&mp->m_growlock);
2059	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2060	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2061	mp->m_kobj.kobject.kset = xfs_kset;
2062	/*
2063	 * We don't create the finobt per-ag space reservation until after log
2064	 * recovery, so we must set this to true so that an ifree transaction
2065	 * started during log recovery will not depend on space reservations
2066	 * for finobt expansion.
2067	 */
2068	mp->m_finobt_nores = true;
2069
2070	/*
2071	 * These can be overridden by the mount option parsing.
2072	 */
2073	mp->m_logbufs = -1;
2074	mp->m_logbsize = -1;
2075	mp->m_allocsize_log = 16; /* 64k */
2076
2077	xfs_hooks_init(&mp->m_dir_update_hooks);
2078
2079	fc->s_fs_info = mp;
2080	fc->ops = &xfs_context_ops;
2081
2082	return 0;
2083}
2084
2085static void
2086xfs_kill_sb(
2087	struct super_block		*sb)
2088{
2089	kill_block_super(sb);
2090	xfs_mount_free(XFS_M(sb));
2091}
2092
2093static struct file_system_type xfs_fs_type = {
2094	.owner			= THIS_MODULE,
2095	.name			= "xfs",
2096	.init_fs_context	= xfs_init_fs_context,
2097	.parameters		= xfs_fs_parameters,
2098	.kill_sb		= xfs_kill_sb,
2099	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
2100};
2101MODULE_ALIAS_FS("xfs");
2102
2103STATIC int __init
2104xfs_init_caches(void)
2105{
2106	int		error;
2107
2108	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2109					 SLAB_HWCACHE_ALIGN |
2110					 SLAB_RECLAIM_ACCOUNT,
2111					 NULL);
2112	if (!xfs_buf_cache)
2113		goto out;
2114
2115	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2116						sizeof(struct xlog_ticket),
2117						0, 0, NULL);
2118	if (!xfs_log_ticket_cache)
2119		goto out_destroy_buf_cache;
2120
2121	error = xfs_btree_init_cur_caches();
2122	if (error)
2123		goto out_destroy_log_ticket_cache;
2124
2125	error = rcbagbt_init_cur_cache();
2126	if (error)
2127		goto out_destroy_btree_cur_cache;
2128
2129	error = xfs_defer_init_item_caches();
2130	if (error)
2131		goto out_destroy_rcbagbt_cur_cache;
2132
2133	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2134					      sizeof(struct xfs_da_state),
2135					      0, 0, NULL);
2136	if (!xfs_da_state_cache)
2137		goto out_destroy_defer_item_cache;
2138
2139	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2140					   sizeof(struct xfs_ifork),
2141					   0, 0, NULL);
2142	if (!xfs_ifork_cache)
2143		goto out_destroy_da_state_cache;
2144
2145	xfs_trans_cache = kmem_cache_create("xfs_trans",
2146					   sizeof(struct xfs_trans),
2147					   0, 0, NULL);
2148	if (!xfs_trans_cache)
2149		goto out_destroy_ifork_cache;
2150
2151
2152	/*
2153	 * The size of the cache-allocated buf log item is the maximum
2154	 * size possible under XFS.  This wastes a little bit of memory,
2155	 * but it is much faster.
2156	 */
2157	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2158					      sizeof(struct xfs_buf_log_item),
2159					      0, 0, NULL);
2160	if (!xfs_buf_item_cache)
2161		goto out_destroy_trans_cache;
2162
2163	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2164			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2165			0, 0, NULL);
2166	if (!xfs_efd_cache)
2167		goto out_destroy_buf_item_cache;
2168
2169	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2170			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2171			0, 0, NULL);
2172	if (!xfs_efi_cache)
2173		goto out_destroy_efd_cache;
2174
2175	xfs_inode_cache = kmem_cache_create("xfs_inode",
2176					   sizeof(struct xfs_inode), 0,
2177					   (SLAB_HWCACHE_ALIGN |
2178					    SLAB_RECLAIM_ACCOUNT |
2179					    SLAB_ACCOUNT),
2180					   xfs_fs_inode_init_once);
2181	if (!xfs_inode_cache)
2182		goto out_destroy_efi_cache;
2183
2184	xfs_ili_cache = kmem_cache_create("xfs_ili",
2185					 sizeof(struct xfs_inode_log_item), 0,
2186					 SLAB_RECLAIM_ACCOUNT,
2187					 NULL);
2188	if (!xfs_ili_cache)
2189		goto out_destroy_inode_cache;
2190
2191	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2192					     sizeof(struct xfs_icreate_item),
2193					     0, 0, NULL);
2194	if (!xfs_icreate_cache)
2195		goto out_destroy_ili_cache;
2196
2197	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2198					 sizeof(struct xfs_rud_log_item),
2199					 0, 0, NULL);
2200	if (!xfs_rud_cache)
2201		goto out_destroy_icreate_cache;
2202
2203	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2204			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2205			0, 0, NULL);
2206	if (!xfs_rui_cache)
2207		goto out_destroy_rud_cache;
2208
2209	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2210					 sizeof(struct xfs_cud_log_item),
2211					 0, 0, NULL);
2212	if (!xfs_cud_cache)
2213		goto out_destroy_rui_cache;
2214
2215	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2216			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2217			0, 0, NULL);
2218	if (!xfs_cui_cache)
2219		goto out_destroy_cud_cache;
2220
2221	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2222					 sizeof(struct xfs_bud_log_item),
2223					 0, 0, NULL);
2224	if (!xfs_bud_cache)
2225		goto out_destroy_cui_cache;
2226
2227	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2228			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2229			0, 0, NULL);
2230	if (!xfs_bui_cache)
2231		goto out_destroy_bud_cache;
2232
2233	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2234					    sizeof(struct xfs_attrd_log_item),
2235					    0, 0, NULL);
2236	if (!xfs_attrd_cache)
2237		goto out_destroy_bui_cache;
2238
2239	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2240					    sizeof(struct xfs_attri_log_item),
2241					    0, 0, NULL);
2242	if (!xfs_attri_cache)
2243		goto out_destroy_attrd_cache;
2244
2245	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2246					     sizeof(struct xfs_iunlink_item),
2247					     0, 0, NULL);
2248	if (!xfs_iunlink_cache)
2249		goto out_destroy_attri_cache;
2250
2251	xfs_xmd_cache = kmem_cache_create("xfs_xmd_item",
2252					 sizeof(struct xfs_xmd_log_item),
2253					 0, 0, NULL);
2254	if (!xfs_xmd_cache)
2255		goto out_destroy_iul_cache;
2256
2257	xfs_xmi_cache = kmem_cache_create("xfs_xmi_item",
2258					 sizeof(struct xfs_xmi_log_item),
2259					 0, 0, NULL);
2260	if (!xfs_xmi_cache)
2261		goto out_destroy_xmd_cache;
2262
2263	xfs_parent_args_cache = kmem_cache_create("xfs_parent_args",
2264					     sizeof(struct xfs_parent_args),
2265					     0, 0, NULL);
2266	if (!xfs_parent_args_cache)
2267		goto out_destroy_xmi_cache;
2268
2269	return 0;
2270
2271 out_destroy_xmi_cache:
2272	kmem_cache_destroy(xfs_xmi_cache);
2273 out_destroy_xmd_cache:
2274	kmem_cache_destroy(xfs_xmd_cache);
2275 out_destroy_iul_cache:
2276	kmem_cache_destroy(xfs_iunlink_cache);
2277 out_destroy_attri_cache:
2278	kmem_cache_destroy(xfs_attri_cache);
2279 out_destroy_attrd_cache:
2280	kmem_cache_destroy(xfs_attrd_cache);
2281 out_destroy_bui_cache:
2282	kmem_cache_destroy(xfs_bui_cache);
2283 out_destroy_bud_cache:
2284	kmem_cache_destroy(xfs_bud_cache);
2285 out_destroy_cui_cache:
2286	kmem_cache_destroy(xfs_cui_cache);
2287 out_destroy_cud_cache:
2288	kmem_cache_destroy(xfs_cud_cache);
2289 out_destroy_rui_cache:
2290	kmem_cache_destroy(xfs_rui_cache);
2291 out_destroy_rud_cache:
2292	kmem_cache_destroy(xfs_rud_cache);
2293 out_destroy_icreate_cache:
2294	kmem_cache_destroy(xfs_icreate_cache);
2295 out_destroy_ili_cache:
2296	kmem_cache_destroy(xfs_ili_cache);
2297 out_destroy_inode_cache:
2298	kmem_cache_destroy(xfs_inode_cache);
2299 out_destroy_efi_cache:
2300	kmem_cache_destroy(xfs_efi_cache);
2301 out_destroy_efd_cache:
2302	kmem_cache_destroy(xfs_efd_cache);
2303 out_destroy_buf_item_cache:
2304	kmem_cache_destroy(xfs_buf_item_cache);
2305 out_destroy_trans_cache:
2306	kmem_cache_destroy(xfs_trans_cache);
2307 out_destroy_ifork_cache:
2308	kmem_cache_destroy(xfs_ifork_cache);
2309 out_destroy_da_state_cache:
2310	kmem_cache_destroy(xfs_da_state_cache);
2311 out_destroy_defer_item_cache:
2312	xfs_defer_destroy_item_caches();
2313 out_destroy_rcbagbt_cur_cache:
2314	rcbagbt_destroy_cur_cache();
2315 out_destroy_btree_cur_cache:
2316	xfs_btree_destroy_cur_caches();
2317 out_destroy_log_ticket_cache:
2318	kmem_cache_destroy(xfs_log_ticket_cache);
2319 out_destroy_buf_cache:
2320	kmem_cache_destroy(xfs_buf_cache);
2321 out:
2322	return -ENOMEM;
2323}
2324
2325STATIC void
2326xfs_destroy_caches(void)
2327{
2328	/*
2329	 * Make sure all delayed rcu free are flushed before we
2330	 * destroy caches.
2331	 */
2332	rcu_barrier();
2333	kmem_cache_destroy(xfs_parent_args_cache);
2334	kmem_cache_destroy(xfs_xmd_cache);
2335	kmem_cache_destroy(xfs_xmi_cache);
2336	kmem_cache_destroy(xfs_iunlink_cache);
2337	kmem_cache_destroy(xfs_attri_cache);
2338	kmem_cache_destroy(xfs_attrd_cache);
2339	kmem_cache_destroy(xfs_bui_cache);
2340	kmem_cache_destroy(xfs_bud_cache);
2341	kmem_cache_destroy(xfs_cui_cache);
2342	kmem_cache_destroy(xfs_cud_cache);
2343	kmem_cache_destroy(xfs_rui_cache);
2344	kmem_cache_destroy(xfs_rud_cache);
2345	kmem_cache_destroy(xfs_icreate_cache);
2346	kmem_cache_destroy(xfs_ili_cache);
2347	kmem_cache_destroy(xfs_inode_cache);
2348	kmem_cache_destroy(xfs_efi_cache);
2349	kmem_cache_destroy(xfs_efd_cache);
2350	kmem_cache_destroy(xfs_buf_item_cache);
2351	kmem_cache_destroy(xfs_trans_cache);
2352	kmem_cache_destroy(xfs_ifork_cache);
2353	kmem_cache_destroy(xfs_da_state_cache);
2354	xfs_defer_destroy_item_caches();
2355	rcbagbt_destroy_cur_cache();
2356	xfs_btree_destroy_cur_caches();
2357	kmem_cache_destroy(xfs_log_ticket_cache);
2358	kmem_cache_destroy(xfs_buf_cache);
2359}
2360
2361STATIC int __init
2362xfs_init_workqueues(void)
2363{
2364	/*
2365	 * The allocation workqueue can be used in memory reclaim situations
2366	 * (writepage path), and parallelism is only limited by the number of
2367	 * AGs in all the filesystems mounted. Hence use the default large
2368	 * max_active value for this workqueue.
2369	 */
2370	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2371			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2372	if (!xfs_alloc_wq)
2373		return -ENOMEM;
2374
2375	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2376			0);
2377	if (!xfs_discard_wq)
2378		goto out_free_alloc_wq;
2379
2380	return 0;
2381out_free_alloc_wq:
2382	destroy_workqueue(xfs_alloc_wq);
2383	return -ENOMEM;
2384}
2385
2386STATIC void
2387xfs_destroy_workqueues(void)
2388{
2389	destroy_workqueue(xfs_discard_wq);
2390	destroy_workqueue(xfs_alloc_wq);
2391}
2392
2393STATIC int __init
2394init_xfs_fs(void)
2395{
2396	int			error;
2397
2398	xfs_check_ondisk_structs();
2399
2400	error = xfs_dahash_test();
2401	if (error)
2402		return error;
2403
2404	printk(KERN_INFO XFS_VERSION_STRING " with "
2405			 XFS_BUILD_OPTIONS " enabled\n");
2406
2407	xfs_dir_startup();
2408
2409	error = xfs_init_caches();
2410	if (error)
2411		goto out;
2412
2413	error = xfs_init_workqueues();
2414	if (error)
2415		goto out_destroy_caches;
2416
2417	error = xfs_mru_cache_init();
2418	if (error)
2419		goto out_destroy_wq;
2420
 
 
 
 
2421	error = xfs_init_procfs();
2422	if (error)
2423		goto out_mru_cache_uninit;
2424
2425	error = xfs_sysctl_register();
2426	if (error)
2427		goto out_cleanup_procfs;
2428
2429	xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2430
2431	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2432	if (!xfs_kset) {
2433		error = -ENOMEM;
2434		goto out_debugfs_unregister;
2435	}
2436
2437	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2438
2439	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2440	if (!xfsstats.xs_stats) {
2441		error = -ENOMEM;
2442		goto out_kset_unregister;
2443	}
2444
2445	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2446			       "stats");
2447	if (error)
2448		goto out_free_stats;
2449
2450	error = xchk_global_stats_setup(xfs_debugfs);
2451	if (error)
2452		goto out_remove_stats_kobj;
2453
2454#ifdef DEBUG
2455	xfs_dbg_kobj.kobject.kset = xfs_kset;
2456	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2457	if (error)
2458		goto out_remove_scrub_stats;
2459#endif
2460
2461	error = xfs_qm_init();
2462	if (error)
2463		goto out_remove_dbg_kobj;
2464
2465	error = register_filesystem(&xfs_fs_type);
2466	if (error)
2467		goto out_qm_exit;
2468	return 0;
2469
2470 out_qm_exit:
2471	xfs_qm_exit();
2472 out_remove_dbg_kobj:
2473#ifdef DEBUG
2474	xfs_sysfs_del(&xfs_dbg_kobj);
2475 out_remove_scrub_stats:
2476#endif
2477	xchk_global_stats_teardown();
2478 out_remove_stats_kobj:
2479	xfs_sysfs_del(&xfsstats.xs_kobj);
2480 out_free_stats:
2481	free_percpu(xfsstats.xs_stats);
2482 out_kset_unregister:
2483	kset_unregister(xfs_kset);
2484 out_debugfs_unregister:
2485	debugfs_remove(xfs_debugfs);
2486	xfs_sysctl_unregister();
2487 out_cleanup_procfs:
2488	xfs_cleanup_procfs();
 
 
2489 out_mru_cache_uninit:
2490	xfs_mru_cache_uninit();
2491 out_destroy_wq:
2492	xfs_destroy_workqueues();
2493 out_destroy_caches:
2494	xfs_destroy_caches();
2495 out:
2496	return error;
2497}
2498
2499STATIC void __exit
2500exit_xfs_fs(void)
2501{
2502	xfs_qm_exit();
2503	unregister_filesystem(&xfs_fs_type);
2504#ifdef DEBUG
2505	xfs_sysfs_del(&xfs_dbg_kobj);
2506#endif
2507	xchk_global_stats_teardown();
2508	xfs_sysfs_del(&xfsstats.xs_kobj);
2509	free_percpu(xfsstats.xs_stats);
2510	kset_unregister(xfs_kset);
2511	debugfs_remove(xfs_debugfs);
2512	xfs_sysctl_unregister();
2513	xfs_cleanup_procfs();
 
2514	xfs_mru_cache_uninit();
2515	xfs_destroy_workqueues();
2516	xfs_destroy_caches();
2517	xfs_uuid_table_free();
2518}
2519
2520module_init(init_xfs_fs);
2521module_exit(exit_xfs_fs);
2522
2523MODULE_AUTHOR("Silicon Graphics, Inc.");
2524MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2525MODULE_LICENSE("GPL");