Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18
  19#include "xfs.h"
  20#include "xfs_bit.h"
  21#include "xfs_log.h"
  22#include "xfs_inum.h"
  23#include "xfs_trans.h"
  24#include "xfs_sb.h"
  25#include "xfs_ag.h"
  26#include "xfs_dir2.h"
  27#include "xfs_alloc.h"
  28#include "xfs_quota.h"
  29#include "xfs_mount.h"
  30#include "xfs_bmap_btree.h"
  31#include "xfs_alloc_btree.h"
  32#include "xfs_ialloc_btree.h"
  33#include "xfs_dinode.h"
  34#include "xfs_inode.h"
  35#include "xfs_btree.h"
  36#include "xfs_ialloc.h"
  37#include "xfs_bmap.h"
  38#include "xfs_rtalloc.h"
  39#include "xfs_error.h"
  40#include "xfs_itable.h"
  41#include "xfs_fsops.h"
  42#include "xfs_attr.h"
  43#include "xfs_buf_item.h"
  44#include "xfs_utils.h"
  45#include "xfs_vnodeops.h"
  46#include "xfs_log_priv.h"
  47#include "xfs_trans_priv.h"
  48#include "xfs_filestream.h"
  49#include "xfs_da_btree.h"
  50#include "xfs_extfree_item.h"
  51#include "xfs_mru_cache.h"
  52#include "xfs_inode_item.h"
  53#include "xfs_sync.h"
  54#include "xfs_trace.h"
  55
  56#include <linux/namei.h>
  57#include <linux/init.h>
  58#include <linux/slab.h>
  59#include <linux/mount.h>
  60#include <linux/mempool.h>
  61#include <linux/writeback.h>
  62#include <linux/kthread.h>
  63#include <linux/freezer.h>
  64#include <linux/parser.h>
 
 
 
  65
  66static const struct super_operations xfs_super_operations;
  67static kmem_zone_t *xfs_ioend_zone;
  68mempool_t *xfs_ioend_pool;
  69
  70#define MNTOPT_LOGBUFS	"logbufs"	/* number of XFS log buffers */
  71#define MNTOPT_LOGBSIZE	"logbsize"	/* size of XFS log buffers */
  72#define MNTOPT_LOGDEV	"logdev"	/* log device */
  73#define MNTOPT_RTDEV	"rtdev"		/* realtime I/O device */
  74#define MNTOPT_BIOSIZE	"biosize"	/* log2 of preferred buffered io size */
  75#define MNTOPT_WSYNC	"wsync"		/* safe-mode nfs compatible mount */
  76#define MNTOPT_NOALIGN	"noalign"	/* turn off stripe alignment */
  77#define MNTOPT_SWALLOC	"swalloc"	/* turn on stripe width allocation */
  78#define MNTOPT_SUNIT	"sunit"		/* data volume stripe unit */
  79#define MNTOPT_SWIDTH	"swidth"	/* data volume stripe width */
  80#define MNTOPT_NOUUID	"nouuid"	/* ignore filesystem UUID */
  81#define MNTOPT_MTPT	"mtpt"		/* filesystem mount point */
  82#define MNTOPT_GRPID	"grpid"		/* group-ID from parent directory */
  83#define MNTOPT_NOGRPID	"nogrpid"	/* group-ID from current process */
  84#define MNTOPT_BSDGROUPS    "bsdgroups"    /* group-ID from parent directory */
  85#define MNTOPT_SYSVGROUPS   "sysvgroups"   /* group-ID from current process */
  86#define MNTOPT_ALLOCSIZE    "allocsize"    /* preferred allocation size */
  87#define MNTOPT_NORECOVERY   "norecovery"   /* don't run XFS recovery */
  88#define MNTOPT_BARRIER	"barrier"	/* use writer barriers for log write and
  89					 * unwritten extent conversion */
  90#define MNTOPT_NOBARRIER "nobarrier"	/* .. disable */
  91#define MNTOPT_64BITINODE   "inode64"	/* inodes can be allocated anywhere */
  92#define MNTOPT_IKEEP	"ikeep"		/* do not free empty inode clusters */
  93#define MNTOPT_NOIKEEP	"noikeep"	/* free empty inode clusters */
  94#define MNTOPT_LARGEIO	   "largeio"	/* report large I/O sizes in stat() */
  95#define MNTOPT_NOLARGEIO   "nolargeio"	/* do not report large I/O sizes
  96					 * in stat(). */
  97#define MNTOPT_ATTR2	"attr2"		/* do use attr2 attribute format */
  98#define MNTOPT_NOATTR2	"noattr2"	/* do not use attr2 attribute format */
  99#define MNTOPT_FILESTREAM  "filestreams" /* use filestreams allocator */
 100#define MNTOPT_QUOTA	"quota"		/* disk quotas (user) */
 101#define MNTOPT_NOQUOTA	"noquota"	/* no quotas */
 102#define MNTOPT_USRQUOTA	"usrquota"	/* user quota enabled */
 103#define MNTOPT_GRPQUOTA	"grpquota"	/* group quota enabled */
 104#define MNTOPT_PRJQUOTA	"prjquota"	/* project quota enabled */
 105#define MNTOPT_UQUOTA	"uquota"	/* user quota (IRIX variant) */
 106#define MNTOPT_GQUOTA	"gquota"	/* group quota (IRIX variant) */
 107#define MNTOPT_PQUOTA	"pquota"	/* project quota (IRIX variant) */
 108#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
 109#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
 110#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
 111#define MNTOPT_QUOTANOENF  "qnoenforce"	/* same as uqnoenforce */
 112#define MNTOPT_DELAYLOG    "delaylog"	/* Delayed logging enabled */
 113#define MNTOPT_NODELAYLOG  "nodelaylog"	/* Delayed logging disabled */
 114#define MNTOPT_DISCARD	   "discard"	/* Discard unused blocks */
 115#define MNTOPT_NODISCARD   "nodiscard"	/* Do not discard unused blocks */
 116
 117/*
 118 * Table driven mount option parser.
 119 *
 120 * Currently only used for remount, but it will be used for mount
 121 * in the future, too.
 122 */
 123enum {
 124	Opt_barrier, Opt_nobarrier, Opt_err
 125};
 126
 127static const match_table_t tokens = {
 128	{Opt_barrier, "barrier"},
 129	{Opt_nobarrier, "nobarrier"},
 130	{Opt_err, NULL}
 131};
 132
 133
 134STATIC unsigned long
 135suffix_strtoul(char *s, char **endp, unsigned int base)
 
 136{
 137	int	last, shift_left_factor = 0;
 138	char	*value = s;
 139
 140	last = strlen(value) - 1;
 141	if (value[last] == 'K' || value[last] == 'k') {
 142		shift_left_factor = 10;
 143		value[last] = '\0';
 144	}
 145	if (value[last] == 'M' || value[last] == 'm') {
 146		shift_left_factor = 20;
 147		value[last] = '\0';
 148	}
 149	if (value[last] == 'G' || value[last] == 'g') {
 150		shift_left_factor = 30;
 151		value[last] = '\0';
 152	}
 153
 154	return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
 155}
 156
 
 
 
 
 
 
 
 157/*
 158 * This function fills in xfs_mount_t fields based on mount args.
 159 * Note: the superblock has _not_ yet been read in.
 160 *
 161 * Note that this function leaks the various device name allocations on
 162 * failure.  The caller takes care of them.
 163 */
 164STATIC int
 165xfs_parseargs(
 166	struct xfs_mount	*mp,
 167	char			*options)
 168{
 169	struct super_block	*sb = mp->m_super;
 170	char			*this_char, *value, *eov;
 171	int			dsunit = 0;
 172	int			dswidth = 0;
 173	int			iosize = 0;
 174	__uint8_t		iosizelog = 0;
 175
 176	/*
 177	 * set up the mount name first so all the errors will refer to the
 178	 * correct device.
 179	 */
 180	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
 181	if (!mp->m_fsname)
 182		return ENOMEM;
 183	mp->m_fsname_len = strlen(mp->m_fsname) + 1;
 184
 185	/*
 186	 * Copy binary VFS mount flags we are interested in.
 187	 */
 188	if (sb->s_flags & MS_RDONLY)
 189		mp->m_flags |= XFS_MOUNT_RDONLY;
 190	if (sb->s_flags & MS_DIRSYNC)
 191		mp->m_flags |= XFS_MOUNT_DIRSYNC;
 192	if (sb->s_flags & MS_SYNCHRONOUS)
 193		mp->m_flags |= XFS_MOUNT_WSYNC;
 194
 195	/*
 196	 * Set some default flags that could be cleared by the mount option
 197	 * parsing.
 198	 */
 199	mp->m_flags |= XFS_MOUNT_BARRIER;
 200	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
 201	mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
 202	mp->m_flags |= XFS_MOUNT_DELAYLOG;
 203
 204	/*
 205	 * These can be overridden by the mount option parsing.
 206	 */
 207	mp->m_logbufs = -1;
 208	mp->m_logbsize = -1;
 209
 210	if (!options)
 211		goto done;
 212
 213	while ((this_char = strsep(&options, ",")) != NULL) {
 214		if (!*this_char)
 215			continue;
 216		if ((value = strchr(this_char, '=')) != NULL)
 217			*value++ = 0;
 218
 219		if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
 220			if (!value || !*value) {
 221				xfs_warn(mp, "%s option requires an argument",
 222					this_char);
 223				return EINVAL;
 224			}
 225			mp->m_logbufs = simple_strtoul(value, &eov, 10);
 226		} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
 227			if (!value || !*value) {
 228				xfs_warn(mp, "%s option requires an argument",
 229					this_char);
 230				return EINVAL;
 231			}
 232			mp->m_logbsize = suffix_strtoul(value, &eov, 10);
 233		} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
 234			if (!value || !*value) {
 235				xfs_warn(mp, "%s option requires an argument",
 236					this_char);
 237				return EINVAL;
 238			}
 239			mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
 240			if (!mp->m_logname)
 241				return ENOMEM;
 242		} else if (!strcmp(this_char, MNTOPT_MTPT)) {
 243			xfs_warn(mp, "%s option not allowed on this system",
 244				this_char);
 245			return EINVAL;
 246		} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
 247			if (!value || !*value) {
 248				xfs_warn(mp, "%s option requires an argument",
 249					this_char);
 250				return EINVAL;
 251			}
 252			mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
 253			if (!mp->m_rtname)
 254				return ENOMEM;
 255		} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
 256			if (!value || !*value) {
 257				xfs_warn(mp, "%s option requires an argument",
 258					this_char);
 259				return EINVAL;
 260			}
 261			iosize = simple_strtoul(value, &eov, 10);
 262			iosizelog = ffs(iosize) - 1;
 263		} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
 264			if (!value || !*value) {
 265				xfs_warn(mp, "%s option requires an argument",
 266					this_char);
 267				return EINVAL;
 268			}
 269			iosize = suffix_strtoul(value, &eov, 10);
 270			iosizelog = ffs(iosize) - 1;
 271		} else if (!strcmp(this_char, MNTOPT_GRPID) ||
 272			   !strcmp(this_char, MNTOPT_BSDGROUPS)) {
 273			mp->m_flags |= XFS_MOUNT_GRPID;
 274		} else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
 275			   !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
 276			mp->m_flags &= ~XFS_MOUNT_GRPID;
 277		} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
 278			mp->m_flags |= XFS_MOUNT_WSYNC;
 279		} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
 280			mp->m_flags |= XFS_MOUNT_NORECOVERY;
 281		} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
 282			mp->m_flags |= XFS_MOUNT_NOALIGN;
 283		} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
 284			mp->m_flags |= XFS_MOUNT_SWALLOC;
 285		} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
 286			if (!value || !*value) {
 287				xfs_warn(mp, "%s option requires an argument",
 288					this_char);
 289				return EINVAL;
 290			}
 291			dsunit = simple_strtoul(value, &eov, 10);
 292		} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
 293			if (!value || !*value) {
 294				xfs_warn(mp, "%s option requires an argument",
 295					this_char);
 296				return EINVAL;
 297			}
 298			dswidth = simple_strtoul(value, &eov, 10);
 299		} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
 300			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
 301#if !XFS_BIG_INUMS
 302			xfs_warn(mp, "%s option not allowed on this system",
 303				this_char);
 304			return EINVAL;
 305#endif
 306		} else if (!strcmp(this_char, MNTOPT_NOUUID)) {
 307			mp->m_flags |= XFS_MOUNT_NOUUID;
 308		} else if (!strcmp(this_char, MNTOPT_BARRIER)) {
 309			mp->m_flags |= XFS_MOUNT_BARRIER;
 310		} else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
 311			mp->m_flags &= ~XFS_MOUNT_BARRIER;
 312		} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
 313			mp->m_flags |= XFS_MOUNT_IKEEP;
 314		} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
 315			mp->m_flags &= ~XFS_MOUNT_IKEEP;
 316		} else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
 317			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
 318		} else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
 319			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
 320		} else if (!strcmp(this_char, MNTOPT_ATTR2)) {
 321			mp->m_flags |= XFS_MOUNT_ATTR2;
 322		} else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
 323			mp->m_flags &= ~XFS_MOUNT_ATTR2;
 324			mp->m_flags |= XFS_MOUNT_NOATTR2;
 325		} else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
 326			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
 327		} else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
 328			mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
 329					  XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
 330					  XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
 331					  XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
 332		} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
 333			   !strcmp(this_char, MNTOPT_UQUOTA) ||
 334			   !strcmp(this_char, MNTOPT_USRQUOTA)) {
 335			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
 336					 XFS_UQUOTA_ENFD);
 337		} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
 338			   !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
 339			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
 340			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
 341		} else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
 342			   !strcmp(this_char, MNTOPT_PRJQUOTA)) {
 343			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
 344					 XFS_OQUOTA_ENFD);
 345		} else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
 346			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
 347			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
 348		} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
 349			   !strcmp(this_char, MNTOPT_GRPQUOTA)) {
 350			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
 351					 XFS_OQUOTA_ENFD);
 352		} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
 353			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
 354			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
 355		} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
 356			mp->m_flags |= XFS_MOUNT_DELAYLOG;
 357		} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
 358			mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
 359			xfs_warn(mp,
 360	"nodelaylog is deprecated and will be removed in Linux 3.3");
 361		} else if (!strcmp(this_char, MNTOPT_DISCARD)) {
 362			mp->m_flags |= XFS_MOUNT_DISCARD;
 363		} else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
 364			mp->m_flags &= ~XFS_MOUNT_DISCARD;
 365		} else if (!strcmp(this_char, "ihashsize")) {
 366			xfs_warn(mp,
 367	"ihashsize no longer used, option is deprecated.");
 368		} else if (!strcmp(this_char, "osyncisdsync")) {
 369			xfs_warn(mp,
 370	"osyncisdsync has no effect, option is deprecated.");
 371		} else if (!strcmp(this_char, "osyncisosync")) {
 372			xfs_warn(mp,
 373	"osyncisosync has no effect, option is deprecated.");
 374		} else if (!strcmp(this_char, "irixsgid")) {
 375			xfs_warn(mp,
 376	"irixsgid is now a sysctl(2) variable, option is deprecated.");
 377		} else {
 378			xfs_warn(mp, "unknown mount option [%s].", this_char);
 379			return EINVAL;
 380		}
 381	}
 382
 383	/*
 384	 * no recovery flag requires a read-only mount
 385	 */
 386	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
 387	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
 388		xfs_warn(mp, "no-recovery mounts must be read-only.");
 389		return EINVAL;
 390	}
 391
 392	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
 393		xfs_warn(mp,
 394	"sunit and swidth options incompatible with the noalign option");
 395		return EINVAL;
 396	}
 397
 398	if ((mp->m_flags & XFS_MOUNT_DISCARD) &&
 399	    !(mp->m_flags & XFS_MOUNT_DELAYLOG)) {
 400		xfs_warn(mp,
 401	"the discard option is incompatible with the nodelaylog option");
 402		return EINVAL;
 403	}
 404
 405#ifndef CONFIG_XFS_QUOTA
 406	if (XFS_IS_QUOTA_RUNNING(mp)) {
 407		xfs_warn(mp, "quota support not available in this kernel.");
 408		return EINVAL;
 409	}
 410#endif
 411
 412	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
 413	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
 414		xfs_warn(mp, "cannot mount with both project and group quota");
 415		return EINVAL;
 416	}
 417
 418	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
 419		xfs_warn(mp, "sunit and swidth must be specified together");
 420		return EINVAL;
 421	}
 422
 423	if (dsunit && (dswidth % dsunit != 0)) {
 424		xfs_warn(mp,
 425	"stripe width (%d) must be a multiple of the stripe unit (%d)",
 426			dswidth, dsunit);
 427		return EINVAL;
 428	}
 429
 430done:
 431	if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
 432		/*
 433		 * At this point the superblock has not been read
 434		 * in, therefore we do not know the block size.
 435		 * Before the mount call ends we will convert
 436		 * these to FSBs.
 437		 */
 438		if (dsunit) {
 439			mp->m_dalign = dsunit;
 440			mp->m_flags |= XFS_MOUNT_RETERR;
 441		}
 442
 443		if (dswidth)
 444			mp->m_swidth = dswidth;
 445	}
 446
 447	if (mp->m_logbufs != -1 &&
 448	    mp->m_logbufs != 0 &&
 449	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
 450	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
 451		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
 452			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
 453		return XFS_ERROR(EINVAL);
 454	}
 455	if (mp->m_logbsize != -1 &&
 456	    mp->m_logbsize !=  0 &&
 457	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
 458	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
 459	     !is_power_of_2(mp->m_logbsize))) {
 460		xfs_warn(mp,
 461			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
 462			mp->m_logbsize);
 463		return XFS_ERROR(EINVAL);
 464	}
 465
 466	if (iosizelog) {
 467		if (iosizelog > XFS_MAX_IO_LOG ||
 468		    iosizelog < XFS_MIN_IO_LOG) {
 469			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
 470				iosizelog, XFS_MIN_IO_LOG,
 471				XFS_MAX_IO_LOG);
 472			return XFS_ERROR(EINVAL);
 473		}
 474
 475		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
 476		mp->m_readio_log = iosizelog;
 477		mp->m_writeio_log = iosizelog;
 478	}
 479
 480	return 0;
 481}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482
 483struct proc_xfs_info {
 484	int	flag;
 485	char	*str;
 486};
 487
 488STATIC int
 489xfs_showargs(
 490	struct xfs_mount	*mp,
 491	struct seq_file		*m)
 492{
 493	static struct proc_xfs_info xfs_info_set[] = {
 494		/* the few simple ones we can get from the mount struct */
 495		{ XFS_MOUNT_IKEEP,		"," MNTOPT_IKEEP },
 496		{ XFS_MOUNT_WSYNC,		"," MNTOPT_WSYNC },
 497		{ XFS_MOUNT_NOALIGN,		"," MNTOPT_NOALIGN },
 498		{ XFS_MOUNT_SWALLOC,		"," MNTOPT_SWALLOC },
 499		{ XFS_MOUNT_NOUUID,		"," MNTOPT_NOUUID },
 500		{ XFS_MOUNT_NORECOVERY,		"," MNTOPT_NORECOVERY },
 501		{ XFS_MOUNT_ATTR2,		"," MNTOPT_ATTR2 },
 502		{ XFS_MOUNT_FILESTREAMS,	"," MNTOPT_FILESTREAM },
 503		{ XFS_MOUNT_GRPID,		"," MNTOPT_GRPID },
 504		{ XFS_MOUNT_DELAYLOG,		"," MNTOPT_DELAYLOG },
 505		{ XFS_MOUNT_DISCARD,		"," MNTOPT_DISCARD },
 506		{ 0, NULL }
 507	};
 508	static struct proc_xfs_info xfs_info_unset[] = {
 509		/* the few simple ones we can get from the mount struct */
 510		{ XFS_MOUNT_COMPAT_IOSIZE,	"," MNTOPT_LARGEIO },
 511		{ XFS_MOUNT_BARRIER,		"," MNTOPT_NOBARRIER },
 512		{ XFS_MOUNT_SMALL_INUMS,	"," MNTOPT_64BITINODE },
 513		{ 0, NULL }
 514	};
 
 515	struct proc_xfs_info	*xfs_infop;
 516
 517	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 518		if (mp->m_flags & xfs_infop->flag)
 519			seq_puts(m, xfs_infop->str);
 520	}
 521	for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
 522		if (!(mp->m_flags & xfs_infop->flag))
 523			seq_puts(m, xfs_infop->str);
 524	}
 525
 526	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
 527		seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
 528				(int)(1 << mp->m_writeio_log) >> 10);
 
 
 
 529
 530	if (mp->m_logbufs > 0)
 531		seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
 532	if (mp->m_logbsize > 0)
 533		seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
 534
 535	if (mp->m_logname)
 536		seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
 537	if (mp->m_rtname)
 538		seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
 539
 540	if (mp->m_dalign > 0)
 541		seq_printf(m, "," MNTOPT_SUNIT "=%d",
 542				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 543	if (mp->m_swidth > 0)
 544		seq_printf(m, "," MNTOPT_SWIDTH "=%d",
 545				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 546
 547	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
 548		seq_puts(m, "," MNTOPT_USRQUOTA);
 549	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
 550		seq_puts(m, "," MNTOPT_UQUOTANOENF);
 551
 552	/* Either project or group quotas can be active, not both */
 553
 554	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
 555		if (mp->m_qflags & XFS_OQUOTA_ENFD)
 556			seq_puts(m, "," MNTOPT_PRJQUOTA);
 557		else
 558			seq_puts(m, "," MNTOPT_PQUOTANOENF);
 559	} else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
 560		if (mp->m_qflags & XFS_OQUOTA_ENFD)
 561			seq_puts(m, "," MNTOPT_GRPQUOTA);
 
 562		else
 563			seq_puts(m, "," MNTOPT_GQUOTANOENF);
 564	}
 565
 566	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 567		seq_puts(m, "," MNTOPT_NOQUOTA);
 568
 569	return 0;
 570}
 571__uint64_t
 572xfs_max_file_offset(
 573	unsigned int		blockshift)
 574{
 575	unsigned int		pagefactor = 1;
 576	unsigned int		bitshift = BITS_PER_LONG - 1;
 577
 578	/* Figure out maximum filesize, on Linux this can depend on
 579	 * the filesystem blocksize (on 32 bit platforms).
 580	 * __block_write_begin does this in an [unsigned] long...
 581	 *      page->index << (PAGE_CACHE_SHIFT - bbits)
 582	 * So, for page sized blocks (4K on 32 bit platforms),
 583	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
 584	 *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
 585	 * but for smaller blocksizes it is less (bbits = log2 bsize).
 586	 * Note1: get_block_t takes a long (implicit cast from above)
 587	 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
 588	 * can optionally convert the [unsigned] long from above into
 589	 * an [unsigned] long long.
 590	 */
 591
 592#if BITS_PER_LONG == 32
 593# if defined(CONFIG_LBDAF)
 594	ASSERT(sizeof(sector_t) == 8);
 595	pagefactor = PAGE_CACHE_SIZE;
 596	bitshift = BITS_PER_LONG;
 597# else
 598	pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
 599# endif
 600#endif
 601
 602	return (((__uint64_t)pagefactor) << bitshift) - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603}
 604
 605STATIC int
 606xfs_blkdev_get(
 607	xfs_mount_t		*mp,
 608	const char		*name,
 609	struct block_device	**bdevp)
 610{
 611	int			error = 0;
 612
 613	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 614				    mp);
 615	if (IS_ERR(*bdevp)) {
 616		error = PTR_ERR(*bdevp);
 617		xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
 618	}
 619
 620	return -error;
 621}
 622
 623STATIC void
 624xfs_blkdev_put(
 625	struct block_device	*bdev)
 626{
 627	if (bdev)
 628		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 629}
 630
 631void
 632xfs_blkdev_issue_flush(
 633	xfs_buftarg_t		*buftarg)
 634{
 635	blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
 636}
 637
 638STATIC void
 639xfs_close_devices(
 640	struct xfs_mount	*mp)
 641{
 
 
 642	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 643		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
 644		xfs_free_buftarg(mp, mp->m_logdev_targp);
 
 
 645		xfs_blkdev_put(logdev);
 
 646	}
 647	if (mp->m_rtdev_targp) {
 648		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
 649		xfs_free_buftarg(mp, mp->m_rtdev_targp);
 
 
 650		xfs_blkdev_put(rtdev);
 
 651	}
 652	xfs_free_buftarg(mp, mp->m_ddev_targp);
 
 653}
 654
 655/*
 656 * The file system configurations are:
 657 *	(1) device (partition) with data and internal log
 658 *	(2) logical volume with data and log subvolumes.
 659 *	(3) logical volume with data, log, and realtime subvolumes.
 660 *
 661 * We only have to handle opening the log and realtime volumes here if
 662 * they are present.  The data subvolume has already been opened by
 663 * get_sb_bdev() and is stored in sb->s_bdev.
 664 */
 665STATIC int
 666xfs_open_devices(
 667	struct xfs_mount	*mp)
 668{
 669	struct block_device	*ddev = mp->m_super->s_bdev;
 
 
 670	struct block_device	*logdev = NULL, *rtdev = NULL;
 671	int			error;
 672
 673	/*
 674	 * Open real time and log devices - order is important.
 675	 */
 676	if (mp->m_logname) {
 677		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
 678		if (error)
 679			goto out;
 
 680	}
 681
 682	if (mp->m_rtname) {
 683		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
 684		if (error)
 685			goto out_close_logdev;
 686
 687		if (rtdev == ddev || rtdev == logdev) {
 688			xfs_warn(mp,
 689	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 690			error = EINVAL;
 691			goto out_close_rtdev;
 692		}
 
 693	}
 694
 695	/*
 696	 * Setup xfs_mount buffer target pointers
 697	 */
 698	error = ENOMEM;
 699	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
 700	if (!mp->m_ddev_targp)
 701		goto out_close_rtdev;
 702
 703	if (rtdev) {
 704		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
 705							mp->m_fsname);
 706		if (!mp->m_rtdev_targp)
 707			goto out_free_ddev_targ;
 708	}
 709
 710	if (logdev && logdev != ddev) {
 711		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
 712							mp->m_fsname);
 713		if (!mp->m_logdev_targp)
 714			goto out_free_rtdev_targ;
 715	} else {
 716		mp->m_logdev_targp = mp->m_ddev_targp;
 717	}
 718
 719	return 0;
 720
 721 out_free_rtdev_targ:
 722	if (mp->m_rtdev_targp)
 723		xfs_free_buftarg(mp, mp->m_rtdev_targp);
 724 out_free_ddev_targ:
 725	xfs_free_buftarg(mp, mp->m_ddev_targp);
 726 out_close_rtdev:
 727	if (rtdev)
 728		xfs_blkdev_put(rtdev);
 729 out_close_logdev:
 730	if (logdev && logdev != ddev)
 731		xfs_blkdev_put(logdev);
 
 
 732 out:
 
 733	return error;
 734}
 735
 736/*
 737 * Setup xfs_mount buffer target pointers based on superblock
 738 */
 739STATIC int
 740xfs_setup_devices(
 741	struct xfs_mount	*mp)
 742{
 743	int			error;
 744
 745	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
 746				    mp->m_sb.sb_sectsize);
 747	if (error)
 748		return error;
 749
 750	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 751		unsigned int	log_sector_size = BBSIZE;
 752
 753		if (xfs_sb_version_hassector(&mp->m_sb))
 754			log_sector_size = mp->m_sb.sb_logsectsize;
 755		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 756					    mp->m_sb.sb_blocksize,
 757					    log_sector_size);
 758		if (error)
 759			return error;
 760	}
 761	if (mp->m_rtdev_targp) {
 762		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 763					    mp->m_sb.sb_blocksize,
 764					    mp->m_sb.sb_sectsize);
 765		if (error)
 766			return error;
 767	}
 768
 769	return 0;
 770}
 771
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 772/* Catch misguided souls that try to use this interface on XFS */
 773STATIC struct inode *
 774xfs_fs_alloc_inode(
 775	struct super_block	*sb)
 776{
 777	BUG();
 778	return NULL;
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781/*
 782 * Now that the generic code is guaranteed not to be accessing
 783 * the linux inode, we can reclaim the inode.
 784 */
 785STATIC void
 786xfs_fs_destroy_inode(
 787	struct inode		*inode)
 788{
 789	struct xfs_inode	*ip = XFS_I(inode);
 790
 791	trace_xfs_destroy_inode(ip);
 792
 793	XFS_STATS_INC(vn_reclaim);
 
 
 794
 795	/* bad inode, get out here ASAP */
 796	if (is_bad_inode(inode))
 797		goto out_reclaim;
 798
 799	xfs_ioend_wait(ip);
 
 
 
 
 800
 801	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
 802
 803	/*
 804	 * We should never get here with one of the reclaim flags already set.
 805	 */
 806	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
 807	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
 808
 809	/*
 810	 * We always use background reclaim here because even if the
 811	 * inode is clean, it still may be under IO and hence we have
 812	 * to take the flush lock. The background reclaim path handles
 813	 * this more efficiently than we can here, so simply let background
 814	 * reclaim tear down all inodes.
 815	 */
 816out_reclaim:
 817	xfs_inode_set_reclaim_tag(ip);
 818}
 819
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 820/*
 821 * Slab object creation initialisation for the XFS inode.
 822 * This covers only the idempotent fields in the XFS inode;
 823 * all other fields need to be initialised on allocation
 824 * from the slab. This avoids the need to repeatedly initialise
 825 * fields in the xfs inode that left in the initialise state
 826 * when freeing the inode.
 827 */
 828STATIC void
 829xfs_fs_inode_init_once(
 830	void			*inode)
 831{
 832	struct xfs_inode	*ip = inode;
 833
 834	memset(ip, 0, sizeof(struct xfs_inode));
 835
 836	/* vfs inode */
 837	inode_init_once(VFS_I(ip));
 838
 839	/* xfs inode */
 840	atomic_set(&ip->i_iocount, 0);
 841	atomic_set(&ip->i_pincount, 0);
 842	spin_lock_init(&ip->i_flags_lock);
 843	init_waitqueue_head(&ip->i_ipin_wait);
 844	/*
 845	 * Because we want to use a counting completion, complete
 846	 * the flush completion once to allow a single access to
 847	 * the flush completion without blocking.
 848	 */
 849	init_completion(&ip->i_flush);
 850	complete(&ip->i_flush);
 851
 
 
 852	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
 853		     "xfsino", ip->i_ino);
 854}
 855
 856/*
 857 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
 858 * we catch unlogged VFS level updates to the inode.
 859 *
 860 * We need the barrier() to maintain correct ordering between unlogged
 861 * updates and the transaction commit code that clears the i_update_core
 862 * field. This requires all updates to be completed before marking the
 863 * inode dirty.
 864 */
 865STATIC void
 866xfs_fs_dirty_inode(
 867	struct inode	*inode,
 868	int		flags)
 869{
 870	barrier();
 871	XFS_I(inode)->i_update_core = 1;
 872}
 873
 874STATIC int
 875xfs_log_inode(
 876	struct xfs_inode	*ip)
 877{
 878	struct xfs_mount	*mp = ip->i_mount;
 879	struct xfs_trans	*tp;
 880	int			error;
 881
 882	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
 883	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
 884	if (error) {
 885		xfs_trans_cancel(tp, 0);
 886		return error;
 887	}
 888
 889	xfs_ilock(ip, XFS_ILOCK_EXCL);
 890	xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
 891	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 892	return xfs_trans_commit(tp, 0);
 893}
 894
 895STATIC int
 896xfs_fs_write_inode(
 897	struct inode		*inode,
 898	struct writeback_control *wbc)
 899{
 900	struct xfs_inode	*ip = XFS_I(inode);
 901	struct xfs_mount	*mp = ip->i_mount;
 902	int			error = EAGAIN;
 903
 904	trace_xfs_write_inode(ip);
 905
 906	if (XFS_FORCED_SHUTDOWN(mp))
 907		return -XFS_ERROR(EIO);
 908	if (!ip->i_update_core)
 909		return 0;
 910
 911	if (wbc->sync_mode == WB_SYNC_ALL) {
 912		/*
 913		 * Make sure the inode has made it it into the log.  Instead
 914		 * of forcing it all the way to stable storage using a
 915		 * synchronous transaction we let the log force inside the
 916		 * ->sync_fs call do that for thus, which reduces the number
 917		 * of synchronous log foces dramatically.
 918		 */
 919		xfs_ioend_wait(ip);
 920		error = xfs_log_inode(ip);
 921		if (error)
 922			goto out;
 923		return 0;
 924	} else {
 925		/*
 926		 * We make this non-blocking if the inode is contended, return
 927		 * EAGAIN to indicate to the caller that they did not succeed.
 928		 * This prevents the flush path from blocking on inodes inside
 929		 * another operation right now, they get caught later by
 930		 * xfs_sync.
 931		 */
 932		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
 933			goto out;
 934
 935		if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
 936			goto out_unlock;
 937
 938		/*
 939		 * Now we have the flush lock and the inode is not pinned, we
 940		 * can check if the inode is really clean as we know that
 941		 * there are no pending transaction completions, it is not
 942		 * waiting on the delayed write queue and there is no IO in
 943		 * progress.
 944		 */
 945		if (xfs_inode_clean(ip)) {
 946			xfs_ifunlock(ip);
 947			error = 0;
 948			goto out_unlock;
 949		}
 950		error = xfs_iflush(ip, SYNC_TRYLOCK);
 951	}
 952
 953 out_unlock:
 954	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 955 out:
 956	/*
 957	 * if we failed to write out the inode then mark
 958	 * it dirty again so we'll try again later.
 
 959	 */
 960	if (error)
 961		xfs_mark_inode_dirty_sync(ip);
 962	return -error;
 963}
 964
 965STATIC void
 966xfs_fs_evict_inode(
 967	struct inode		*inode)
 968{
 969	xfs_inode_t		*ip = XFS_I(inode);
 970
 971	trace_xfs_evict_inode(ip);
 972
 973	truncate_inode_pages(&inode->i_data, 0);
 974	end_writeback(inode);
 975	XFS_STATS_INC(vn_rele);
 976	XFS_STATS_INC(vn_remove);
 977	XFS_STATS_DEC(vn_active);
 978
 979	/*
 980	 * The iolock is used by the file system to coordinate reads,
 981	 * writes, and block truncates.  Up to this point the lock
 982	 * protected concurrent accesses by users of the inode.  But
 983	 * from here forward we're doing some final processing of the
 984	 * inode because we're done with it, and although we reuse the
 985	 * iolock for protection it is really a distinct lock class
 986	 * (in the lockdep sense) from before.  To keep lockdep happy
 987	 * (and basically indicate what we are doing), we explicitly
 988	 * re-init the iolock here.
 989	 */
 990	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 991	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
 992	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
 993			&xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
 994
 995	xfs_inactive(ip);
 996}
 997
 998STATIC void
 999xfs_free_fsname(
1000	struct xfs_mount	*mp)
1001{
1002	kfree(mp->m_fsname);
1003	kfree(mp->m_rtname);
1004	kfree(mp->m_logname);
1005}
1006
1007STATIC void
1008xfs_fs_put_super(
1009	struct super_block	*sb)
1010{
1011	struct xfs_mount	*mp = XFS_M(sb);
1012
1013	xfs_syncd_stop(mp);
1014
1015	/*
1016	 * Blow away any referenced inode in the filestreams cache.
1017	 * This can and will cause log traffic as inodes go inactive
1018	 * here.
1019	 */
1020	xfs_filestream_unmount(mp);
1021
1022	XFS_bflush(mp->m_ddev_targp);
1023
1024	xfs_unmountfs(mp);
1025	xfs_freesb(mp);
1026	xfs_icsb_destroy_counters(mp);
1027	xfs_close_devices(mp);
1028	xfs_free_fsname(mp);
1029	kfree(mp);
1030}
1031
1032STATIC int
1033xfs_fs_sync_fs(
1034	struct super_block	*sb,
1035	int			wait)
1036{
1037	struct xfs_mount	*mp = XFS_M(sb);
1038	int			error;
1039
1040	/*
1041	 * Not much we can do for the first async pass.  Writing out the
1042	 * superblock would be counter-productive as we are going to redirty
1043	 * when writing out other data and metadata (and writing out a single
1044	 * block is quite fast anyway).
1045	 *
1046	 * Try to asynchronously kick off quota syncing at least.
1047	 */
1048	if (!wait) {
1049		xfs_qm_sync(mp, SYNC_TRYLOCK);
1050		return 0;
1051	}
1052
1053	error = xfs_quiesce_data(mp);
1054	if (error)
1055		return -error;
1056
 
1057	if (laptop_mode) {
1058		/*
1059		 * The disk must be active because we're syncing.
1060		 * We schedule xfssyncd now (now that the disk is
1061		 * active) instead of later (when it might not be).
1062		 */
1063		flush_delayed_work_sync(&mp->m_sync_work);
1064	}
1065
1066	return 0;
1067}
1068
1069STATIC int
1070xfs_fs_statfs(
1071	struct dentry		*dentry,
1072	struct kstatfs		*statp)
1073{
1074	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
1075	xfs_sb_t		*sbp = &mp->m_sb;
1076	struct xfs_inode	*ip = XFS_I(dentry->d_inode);
1077	__uint64_t		fakeinos, id;
 
 
 
1078	xfs_extlen_t		lsize;
1079	__int64_t		ffree;
1080
1081	statp->f_type = XFS_SB_MAGIC;
1082	statp->f_namelen = MAXNAMELEN - 1;
1083
1084	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1085	statp->f_fsid.val[0] = (u32)id;
1086	statp->f_fsid.val[1] = (u32)(id >> 32);
1087
1088	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
 
 
1089
1090	spin_lock(&mp->m_sb_lock);
1091	statp->f_bsize = sbp->sb_blocksize;
1092	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1093	statp->f_blocks = sbp->sb_dblocks - lsize;
1094	statp->f_bfree = statp->f_bavail =
1095				sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1096	fakeinos = statp->f_bfree << sbp->sb_inopblog;
1097	statp->f_files =
1098	    MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1099	if (mp->m_maxicount)
 
 
 
1100		statp->f_files = min_t(typeof(statp->f_files),
1101					statp->f_files,
1102					mp->m_maxicount);
 
 
 
 
 
1103
1104	/* make sure statp->f_ffree does not underflow */
1105	ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1106	statp->f_ffree = max_t(__int64_t, ffree, 0);
1107
1108	spin_unlock(&mp->m_sb_lock);
1109
1110	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1111	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1112			      (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1113		xfs_qm_statvfs(ip, statp);
 
 
 
 
 
 
 
 
1114	return 0;
1115}
1116
1117STATIC void
1118xfs_save_resvblks(struct xfs_mount *mp)
1119{
1120	__uint64_t resblks = 0;
1121
1122	mp->m_resblks_save = mp->m_resblks;
1123	xfs_reserve_blocks(mp, &resblks, NULL);
1124}
1125
1126STATIC void
1127xfs_restore_resvblks(struct xfs_mount *mp)
1128{
1129	__uint64_t resblks;
1130
1131	if (mp->m_resblks_save) {
1132		resblks = mp->m_resblks_save;
1133		mp->m_resblks_save = 0;
1134	} else
1135		resblks = xfs_default_resblks(mp);
1136
1137	xfs_reserve_blocks(mp, &resblks, NULL);
1138}
1139
1140STATIC int
1141xfs_fs_remount(
1142	struct super_block	*sb,
1143	int			*flags,
1144	char			*options)
 
 
 
 
 
 
 
 
 
 
 
1145{
1146	struct xfs_mount	*mp = XFS_M(sb);
1147	substring_t		args[MAX_OPT_ARGS];
1148	char			*p;
1149	int			error;
1150
1151	while ((p = strsep(&options, ",")) != NULL) {
1152		int token;
1153
1154		if (!*p)
1155			continue;
1156
1157		token = match_token(p, tokens, args);
1158		switch (token) {
1159		case Opt_barrier:
1160			mp->m_flags |= XFS_MOUNT_BARRIER;
1161			break;
1162		case Opt_nobarrier:
1163			mp->m_flags &= ~XFS_MOUNT_BARRIER;
1164			break;
1165		default:
1166			/*
1167			 * Logically we would return an error here to prevent
1168			 * users from believing they might have changed
1169			 * mount options using remount which can't be changed.
1170			 *
1171			 * But unfortunately mount(8) adds all options from
1172			 * mtab and fstab to the mount arguments in some cases
1173			 * so we can't blindly reject options, but have to
1174			 * check for each specified option if it actually
1175			 * differs from the currently set option and only
1176			 * reject it if that's the case.
1177			 *
1178			 * Until that is implemented we return success for
1179			 * every remount request, and silently ignore all
1180			 * options that we can't actually change.
1181			 */
1182#if 0
1183			xfs_info(mp,
1184		"mount option \"%s\" not supported for remount\n", p);
1185			return -EINVAL;
1186#else
1187			break;
1188#endif
1189		}
1190	}
1191
1192	/* ro -> rw */
1193	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1194		mp->m_flags &= ~XFS_MOUNT_RDONLY;
1195
1196		/*
1197		 * If this is the first remount to writeable state we
1198		 * might have some superblock changes to update.
1199		 */
1200		if (mp->m_update_flags) {
1201			error = xfs_mount_log_sb(mp, mp->m_update_flags);
1202			if (error) {
1203				xfs_warn(mp, "failed to write sb changes");
1204				return error;
1205			}
1206			mp->m_update_flags = 0;
1207		}
1208
1209		/*
1210		 * Fill out the reserve pool if it is empty. Use the stashed
1211		 * value if it is non-zero, otherwise go with the default.
1212		 */
1213		xfs_restore_resvblks(mp);
1214	}
1215
1216	/* rw -> ro */
1217	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1218		/*
1219		 * After we have synced the data but before we sync the
1220		 * metadata, we need to free up the reserve block pool so that
1221		 * the used block count in the superblock on disk is correct at
1222		 * the end of the remount. Stash the current reserve pool size
1223		 * so that if we get remounted rw, we can return it to the same
1224		 * size.
1225		 */
1226
1227		xfs_quiesce_data(mp);
1228		xfs_save_resvblks(mp);
1229		xfs_quiesce_attr(mp);
1230		mp->m_flags |= XFS_MOUNT_RDONLY;
1231	}
1232
1233	return 0;
1234}
1235
1236/*
1237 * Second stage of a freeze. The data is already frozen so we only
1238 * need to take care of the metadata. Once that's done write a dummy
1239 * record to dirty the log in case of a crash while frozen.
 
1240 */
1241STATIC int
1242xfs_fs_freeze(
1243	struct super_block	*sb)
1244{
1245	struct xfs_mount	*mp = XFS_M(sb);
 
 
1246
 
 
 
 
 
 
 
1247	xfs_save_resvblks(mp);
1248	xfs_quiesce_attr(mp);
1249	return -xfs_fs_log_dummy(mp);
 
 
1250}
1251
1252STATIC int
1253xfs_fs_unfreeze(
1254	struct super_block	*sb)
1255{
1256	struct xfs_mount	*mp = XFS_M(sb);
1257
1258	xfs_restore_resvblks(mp);
 
 
1259	return 0;
1260}
1261
1262STATIC int
1263xfs_fs_show_options(
1264	struct seq_file		*m,
1265	struct vfsmount		*mnt)
1266{
1267	return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1268}
1269
1270/*
1271 * This function fills in xfs_mount_t fields based on mount args.
1272 * Note: the superblock _has_ now been read in.
1273 */
1274STATIC int
1275xfs_finish_flags(
1276	struct xfs_mount	*mp)
1277{
1278	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1279
1280	/* Fail a mount where the logbuf is smaller than the log stripe */
1281	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1282		if (mp->m_logbsize <= 0 &&
1283		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1284			mp->m_logbsize = mp->m_sb.sb_logsunit;
1285		} else if (mp->m_logbsize > 0 &&
1286			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1287			xfs_warn(mp,
1288		"logbuf size must be greater than or equal to log stripe size");
1289			return XFS_ERROR(EINVAL);
1290		}
1291	} else {
1292		/* Fail a mount if the logbuf is larger than 32K */
1293		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1294			xfs_warn(mp,
1295		"logbuf size for version 1 logs must be 16K or 32K");
1296			return XFS_ERROR(EINVAL);
1297		}
1298	}
1299
1300	/*
 
 
 
 
 
 
 
 
 
 
1301	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1302	 * told by noattr2 to turn it off
1303	 */
1304	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1305	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
1306		mp->m_flags |= XFS_MOUNT_ATTR2;
1307
1308	/*
1309	 * prohibit r/w mounts of read-only filesystems
1310	 */
1311	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1312		xfs_warn(mp,
1313			"cannot mount a read-only filesystem as read-write");
1314		return XFS_ERROR(EROFS);
 
 
 
 
 
 
 
 
1315	}
1316
1317	return 0;
1318}
1319
1320STATIC int
1321xfs_fs_fill_super(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322	struct super_block	*sb,
1323	void			*data,
1324	int			silent)
1325{
1326	struct inode		*root;
1327	struct xfs_mount	*mp = NULL;
1328	int			flags = 0, error = ENOMEM;
 
 
1329
1330	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1331	if (!mp)
1332		goto out;
 
 
 
 
1333
1334	spin_lock_init(&mp->m_sb_lock);
1335	mutex_init(&mp->m_growlock);
1336	atomic_set(&mp->m_active_trans, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1337
1338	mp->m_super = sb;
1339	sb->s_fs_info = mp;
1340
1341	error = xfs_parseargs(mp, (char *)data);
1342	if (error)
1343		goto out_free_fsname;
1344
1345	sb_min_blocksize(sb, BBSIZE);
1346	sb->s_xattr = xfs_xattr_handlers;
1347	sb->s_export_op = &xfs_export_operations;
1348#ifdef CONFIG_XFS_QUOTA
1349	sb->s_qcop = &xfs_quotactl_operations;
 
1350#endif
1351	sb->s_op = &xfs_super_operations;
1352
1353	if (silent)
 
 
 
 
 
 
 
 
 
 
 
1354		flags |= XFS_MFSI_QUIET;
1355
1356	error = xfs_open_devices(mp);
1357	if (error)
1358		goto out_free_fsname;
1359
1360	error = xfs_icsb_init_counters(mp);
1361	if (error)
1362		goto out_close_devices;
1363
1364	error = xfs_readsb(mp, flags);
1365	if (error)
 
 
 
 
 
 
1366		goto out_destroy_counters;
 
 
 
 
 
1367
1368	error = xfs_finish_flags(mp);
1369	if (error)
1370		goto out_free_sb;
1371
1372	error = xfs_setup_devices(mp);
1373	if (error)
1374		goto out_free_sb;
1375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376	error = xfs_filestream_mount(mp);
1377	if (error)
1378		goto out_free_sb;
1379
1380	/*
1381	 * we must configure the block size in the superblock before we run the
1382	 * full mount process as the mount process can lookup and cache inodes.
1383	 * For the same reason we must also initialise the syncd and register
1384	 * the inode cache shrinker so that inodes can be reclaimed during
1385	 * operations like a quotacheck that iterate all inodes in the
1386	 * filesystem.
1387	 */
1388	sb->s_magic = XFS_SB_MAGIC;
1389	sb->s_blocksize = mp->m_sb.sb_blocksize;
1390	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1391	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
 
1392	sb->s_time_gran = 1;
 
 
 
 
1393	set_posix_acl_flag(sb);
1394
1395	error = xfs_mountfs(mp);
1396	if (error)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397		goto out_filestream_unmount;
 
1398
1399	error = xfs_syncd_init(mp);
1400	if (error)
1401		goto out_unmount;
1402
1403	root = igrab(VFS_I(mp->m_rootip));
1404	if (!root) {
1405		error = ENOENT;
1406		goto out_syncd_stop;
1407	}
1408	if (is_bad_inode(root)) {
1409		error = EINVAL;
1410		goto out_syncd_stop;
1411	}
1412	sb->s_root = d_alloc_root(root);
1413	if (!sb->s_root) {
1414		error = ENOMEM;
1415		goto out_iput;
1416	}
1417
1418	return 0;
1419
1420 out_filestream_unmount:
1421	xfs_filestream_unmount(mp);
1422 out_free_sb:
1423	xfs_freesb(mp);
 
 
1424 out_destroy_counters:
1425	xfs_icsb_destroy_counters(mp);
 
 
1426 out_close_devices:
1427	xfs_close_devices(mp);
1428 out_free_fsname:
1429	xfs_free_fsname(mp);
1430	kfree(mp);
1431 out:
1432	return -error;
1433
1434 out_iput:
1435	iput(root);
1436 out_syncd_stop:
1437	xfs_syncd_stop(mp);
1438 out_unmount:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1439	/*
1440	 * Blow away any referenced inode in the filestreams cache.
1441	 * This can and will cause log traffic as inodes go inactive
1442	 * here.
1443	 */
1444	xfs_filestream_unmount(mp);
 
 
 
 
 
 
 
1445
1446	XFS_bflush(mp->m_ddev_targp);
 
 
 
 
 
1447
1448	xfs_unmountfs(mp);
1449	goto out_free_sb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450}
1451
1452STATIC struct dentry *
1453xfs_fs_mount(
1454	struct file_system_type	*fs_type,
1455	int			flags,
1456	const char		*dev_name,
1457	void			*data)
1458{
1459	return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460}
1461
 
 
 
 
 
 
 
 
 
 
 
 
1462static int
1463xfs_fs_nr_cached_objects(
1464	struct super_block	*sb)
1465{
1466	return xfs_reclaim_inodes_count(XFS_M(sb));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467}
1468
1469static void
1470xfs_fs_free_cached_objects(
1471	struct super_block	*sb,
1472	int			nr_to_scan)
1473{
1474	xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
 
 
 
 
 
 
 
 
 
1475}
1476
1477static const struct super_operations xfs_super_operations = {
1478	.alloc_inode		= xfs_fs_alloc_inode,
1479	.destroy_inode		= xfs_fs_destroy_inode,
1480	.dirty_inode		= xfs_fs_dirty_inode,
1481	.write_inode		= xfs_fs_write_inode,
1482	.evict_inode		= xfs_fs_evict_inode,
1483	.put_super		= xfs_fs_put_super,
1484	.sync_fs		= xfs_fs_sync_fs,
1485	.freeze_fs		= xfs_fs_freeze,
1486	.unfreeze_fs		= xfs_fs_unfreeze,
1487	.statfs			= xfs_fs_statfs,
1488	.remount_fs		= xfs_fs_remount,
1489	.show_options		= xfs_fs_show_options,
1490	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1491	.free_cached_objects	= xfs_fs_free_cached_objects,
1492};
1493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1494static struct file_system_type xfs_fs_type = {
1495	.owner			= THIS_MODULE,
1496	.name			= "xfs",
1497	.mount			= xfs_fs_mount,
 
1498	.kill_sb		= kill_block_super,
1499	.fs_flags		= FS_REQUIRES_DEV,
1500};
 
1501
1502STATIC int __init
1503xfs_init_zones(void)
1504{
1505
1506	xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1507	if (!xfs_ioend_zone)
1508		goto out;
1509
1510	xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1511						  xfs_ioend_zone);
1512	if (!xfs_ioend_pool)
1513		goto out_destroy_ioend_zone;
1514
1515	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1516						"xfs_log_ticket");
1517	if (!xfs_log_ticket_zone)
1518		goto out_destroy_ioend_pool;
1519
1520	xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1521						"xfs_bmap_free_item");
 
1522	if (!xfs_bmap_free_item_zone)
1523		goto out_destroy_log_ticket_zone;
1524
1525	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1526						"xfs_btree_cur");
 
1527	if (!xfs_btree_cur_zone)
1528		goto out_destroy_bmap_free_item_zone;
1529
1530	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1531						"xfs_da_state");
 
1532	if (!xfs_da_state_zone)
1533		goto out_destroy_btree_cur_zone;
1534
1535	xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1536	if (!xfs_dabuf_zone)
1537		goto out_destroy_da_state_zone;
1538
1539	xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1540	if (!xfs_ifork_zone)
1541		goto out_destroy_dabuf_zone;
1542
1543	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
 
 
1544	if (!xfs_trans_zone)
1545		goto out_destroy_ifork_zone;
1546
1547	xfs_log_item_desc_zone =
1548		kmem_zone_init(sizeof(struct xfs_log_item_desc),
1549			       "xfs_log_item_desc");
1550	if (!xfs_log_item_desc_zone)
1551		goto out_destroy_trans_zone;
1552
1553	/*
1554	 * The size of the zone allocated buf log item is the maximum
1555	 * size possible under XFS.  This wastes a little bit of memory,
1556	 * but it is much faster.
1557	 */
1558	xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1559				(((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1560				  NBWORD) * sizeof(int))), "xfs_buf_item");
1561	if (!xfs_buf_item_zone)
1562		goto out_destroy_log_item_desc_zone;
1563
1564	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1565			((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1566				 sizeof(xfs_extent_t))), "xfs_efd_item");
 
 
1567	if (!xfs_efd_zone)
1568		goto out_destroy_buf_item_zone;
1569
1570	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1571			((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1572				sizeof(xfs_extent_t))), "xfs_efi_item");
 
 
1573	if (!xfs_efi_zone)
1574		goto out_destroy_efd_zone;
1575
1576	xfs_inode_zone =
1577		kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1578			KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1579			xfs_fs_inode_init_once);
 
 
1580	if (!xfs_inode_zone)
1581		goto out_destroy_efi_zone;
1582
1583	xfs_ili_zone =
1584		kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1585					KM_ZONE_SPREAD, NULL);
 
1586	if (!xfs_ili_zone)
1587		goto out_destroy_inode_zone;
1588
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1589	return 0;
1590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591 out_destroy_inode_zone:
1592	kmem_zone_destroy(xfs_inode_zone);
1593 out_destroy_efi_zone:
1594	kmem_zone_destroy(xfs_efi_zone);
1595 out_destroy_efd_zone:
1596	kmem_zone_destroy(xfs_efd_zone);
1597 out_destroy_buf_item_zone:
1598	kmem_zone_destroy(xfs_buf_item_zone);
1599 out_destroy_log_item_desc_zone:
1600	kmem_zone_destroy(xfs_log_item_desc_zone);
1601 out_destroy_trans_zone:
1602	kmem_zone_destroy(xfs_trans_zone);
1603 out_destroy_ifork_zone:
1604	kmem_zone_destroy(xfs_ifork_zone);
1605 out_destroy_dabuf_zone:
1606	kmem_zone_destroy(xfs_dabuf_zone);
1607 out_destroy_da_state_zone:
1608	kmem_zone_destroy(xfs_da_state_zone);
1609 out_destroy_btree_cur_zone:
1610	kmem_zone_destroy(xfs_btree_cur_zone);
1611 out_destroy_bmap_free_item_zone:
1612	kmem_zone_destroy(xfs_bmap_free_item_zone);
1613 out_destroy_log_ticket_zone:
1614	kmem_zone_destroy(xfs_log_ticket_zone);
1615 out_destroy_ioend_pool:
1616	mempool_destroy(xfs_ioend_pool);
1617 out_destroy_ioend_zone:
1618	kmem_zone_destroy(xfs_ioend_zone);
1619 out:
1620	return -ENOMEM;
1621}
1622
1623STATIC void
1624xfs_destroy_zones(void)
1625{
1626	kmem_zone_destroy(xfs_ili_zone);
1627	kmem_zone_destroy(xfs_inode_zone);
1628	kmem_zone_destroy(xfs_efi_zone);
1629	kmem_zone_destroy(xfs_efd_zone);
1630	kmem_zone_destroy(xfs_buf_item_zone);
1631	kmem_zone_destroy(xfs_log_item_desc_zone);
1632	kmem_zone_destroy(xfs_trans_zone);
1633	kmem_zone_destroy(xfs_ifork_zone);
1634	kmem_zone_destroy(xfs_dabuf_zone);
1635	kmem_zone_destroy(xfs_da_state_zone);
1636	kmem_zone_destroy(xfs_btree_cur_zone);
1637	kmem_zone_destroy(xfs_bmap_free_item_zone);
1638	kmem_zone_destroy(xfs_log_ticket_zone);
1639	mempool_destroy(xfs_ioend_pool);
1640	kmem_zone_destroy(xfs_ioend_zone);
1641
 
 
 
 
 
 
 
1642}
1643
1644STATIC int __init
1645xfs_init_workqueues(void)
1646{
1647	/*
1648	 * max_active is set to 8 to give enough concurency to allow
1649	 * multiple work operations on each CPU to run. This allows multiple
1650	 * filesystems to be running sync work concurrently, and scales with
1651	 * the number of CPUs in the system.
1652	 */
1653	xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1654	if (!xfs_syncd_wq)
 
1655		return -ENOMEM;
 
 
 
 
 
1656	return 0;
 
 
 
1657}
1658
1659STATIC void
1660xfs_destroy_workqueues(void)
1661{
1662	destroy_workqueue(xfs_syncd_wq);
 
1663}
1664
1665STATIC int __init
1666init_xfs_fs(void)
1667{
1668	int			error;
1669
 
 
1670	printk(KERN_INFO XFS_VERSION_STRING " with "
1671			 XFS_BUILD_OPTIONS " enabled\n");
1672
1673	xfs_ioend_init();
1674	xfs_dir_startup();
1675
1676	error = xfs_init_zones();
1677	if (error)
1678		goto out;
1679
1680	error = xfs_init_workqueues();
1681	if (error)
1682		goto out_destroy_zones;
1683
1684	error = xfs_mru_cache_init();
1685	if (error)
1686		goto out_destroy_wq;
1687
1688	error = xfs_filestream_init();
1689	if (error)
1690		goto out_mru_cache_uninit;
1691
1692	error = xfs_buf_init();
1693	if (error)
1694		goto out_filestream_uninit;
1695
1696	error = xfs_init_procfs();
1697	if (error)
1698		goto out_buf_terminate;
1699
1700	error = xfs_sysctl_register();
1701	if (error)
1702		goto out_cleanup_procfs;
1703
1704	vfs_initquota();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1705
1706	error = register_filesystem(&xfs_fs_type);
1707	if (error)
1708		goto out_sysctl_unregister;
1709	return 0;
1710
 
 
 
 
 
 
 
 
 
 
 
 
1711 out_sysctl_unregister:
1712	xfs_sysctl_unregister();
1713 out_cleanup_procfs:
1714	xfs_cleanup_procfs();
1715 out_buf_terminate:
1716	xfs_buf_terminate();
1717 out_filestream_uninit:
1718	xfs_filestream_uninit();
1719 out_mru_cache_uninit:
1720	xfs_mru_cache_uninit();
1721 out_destroy_wq:
1722	xfs_destroy_workqueues();
1723 out_destroy_zones:
1724	xfs_destroy_zones();
1725 out:
1726	return error;
1727}
1728
1729STATIC void __exit
1730exit_xfs_fs(void)
1731{
1732	vfs_exitquota();
1733	unregister_filesystem(&xfs_fs_type);
 
 
 
 
 
 
1734	xfs_sysctl_unregister();
1735	xfs_cleanup_procfs();
1736	xfs_buf_terminate();
1737	xfs_filestream_uninit();
1738	xfs_mru_cache_uninit();
1739	xfs_destroy_workqueues();
1740	xfs_destroy_zones();
 
1741}
1742
1743module_init(init_xfs_fs);
1744module_exit(exit_xfs_fs);
1745
1746MODULE_AUTHOR("Silicon Graphics, Inc.");
1747MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1748MODULE_LICENSE("GPL");
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   4 * All Rights Reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include "xfs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_sb.h"
 
 
 
 
  13#include "xfs_mount.h"
 
 
 
 
  14#include "xfs_inode.h"
  15#include "xfs_btree.h"
 
  16#include "xfs_bmap.h"
  17#include "xfs_alloc.h"
 
 
  18#include "xfs_fsops.h"
  19#include "xfs_trans.h"
  20#include "xfs_buf_item.h"
  21#include "xfs_log.h"
 
  22#include "xfs_log_priv.h"
  23#include "xfs_dir2.h"
 
 
  24#include "xfs_extfree_item.h"
  25#include "xfs_mru_cache.h"
  26#include "xfs_inode_item.h"
  27#include "xfs_icache.h"
  28#include "xfs_trace.h"
  29#include "xfs_icreate_item.h"
  30#include "xfs_filestream.h"
  31#include "xfs_quota.h"
  32#include "xfs_sysfs.h"
  33#include "xfs_ondisk.h"
  34#include "xfs_rmap_item.h"
  35#include "xfs_refcount_item.h"
  36#include "xfs_bmap_item.h"
  37#include "xfs_reflink.h"
  38
  39#include <linux/magic.h>
  40#include <linux/fs_context.h>
  41#include <linux/fs_parser.h>
  42
  43static const struct super_operations xfs_super_operations;
 
 
  44
  45static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
  46#ifdef DEBUG
  47static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
  48#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49
  50enum xfs_dax_mode {
  51	XFS_DAX_INODE = 0,
  52	XFS_DAX_ALWAYS = 1,
  53	XFS_DAX_NEVER = 2,
  54};
  55
  56static void
  57xfs_mount_set_dax_mode(
  58	struct xfs_mount	*mp,
  59	enum xfs_dax_mode	mode)
  60{
  61	switch (mode) {
  62	case XFS_DAX_INODE:
  63		mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
  64		break;
  65	case XFS_DAX_ALWAYS:
  66		mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
  67		mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
  68		break;
  69	case XFS_DAX_NEVER:
  70		mp->m_flags |= XFS_MOUNT_DAX_NEVER;
  71		mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
  72		break;
 
 
 
  73	}
 
 
  74}
  75
  76static const struct constant_table dax_param_enums[] = {
  77	{"inode",	XFS_DAX_INODE },
  78	{"always",	XFS_DAX_ALWAYS },
  79	{"never",	XFS_DAX_NEVER },
  80	{}
  81};
  82
  83/*
  84 * Table driven mount option parser.
 
 
 
 
  85 */
  86enum {
  87	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
  88	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
  89	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
  90	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
  91	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
  92	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
  93	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
  94	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
  95	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
  96};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97
  98static const struct fs_parameter_spec xfs_fs_parameters[] = {
  99	fsparam_u32("logbufs",		Opt_logbufs),
 100	fsparam_string("logbsize",	Opt_logbsize),
 101	fsparam_string("logdev",	Opt_logdev),
 102	fsparam_string("rtdev",		Opt_rtdev),
 103	fsparam_flag("wsync",		Opt_wsync),
 104	fsparam_flag("noalign",		Opt_noalign),
 105	fsparam_flag("swalloc",		Opt_swalloc),
 106	fsparam_u32("sunit",		Opt_sunit),
 107	fsparam_u32("swidth",		Opt_swidth),
 108	fsparam_flag("nouuid",		Opt_nouuid),
 109	fsparam_flag("grpid",		Opt_grpid),
 110	fsparam_flag("nogrpid",		Opt_nogrpid),
 111	fsparam_flag("bsdgroups",	Opt_bsdgroups),
 112	fsparam_flag("sysvgroups",	Opt_sysvgroups),
 113	fsparam_string("allocsize",	Opt_allocsize),
 114	fsparam_flag("norecovery",	Opt_norecovery),
 115	fsparam_flag("inode64",		Opt_inode64),
 116	fsparam_flag("inode32",		Opt_inode32),
 117	fsparam_flag("ikeep",		Opt_ikeep),
 118	fsparam_flag("noikeep",		Opt_noikeep),
 119	fsparam_flag("largeio",		Opt_largeio),
 120	fsparam_flag("nolargeio",	Opt_nolargeio),
 121	fsparam_flag("attr2",		Opt_attr2),
 122	fsparam_flag("noattr2",		Opt_noattr2),
 123	fsparam_flag("filestreams",	Opt_filestreams),
 124	fsparam_flag("quota",		Opt_quota),
 125	fsparam_flag("noquota",		Opt_noquota),
 126	fsparam_flag("usrquota",	Opt_usrquota),
 127	fsparam_flag("grpquota",	Opt_grpquota),
 128	fsparam_flag("prjquota",	Opt_prjquota),
 129	fsparam_flag("uquota",		Opt_uquota),
 130	fsparam_flag("gquota",		Opt_gquota),
 131	fsparam_flag("pquota",		Opt_pquota),
 132	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
 133	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
 134	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
 135	fsparam_flag("qnoenforce",	Opt_qnoenforce),
 136	fsparam_flag("discard",		Opt_discard),
 137	fsparam_flag("nodiscard",	Opt_nodiscard),
 138	fsparam_flag("dax",		Opt_dax),
 139	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
 140	{}
 141};
 142
 143struct proc_xfs_info {
 144	uint64_t	flag;
 145	char		*str;
 146};
 147
 148static int
 149xfs_fs_show_options(
 150	struct seq_file		*m,
 151	struct dentry		*root)
 152{
 153	static struct proc_xfs_info xfs_info_set[] = {
 154		/* the few simple ones we can get from the mount struct */
 155		{ XFS_MOUNT_IKEEP,		",ikeep" },
 156		{ XFS_MOUNT_WSYNC,		",wsync" },
 157		{ XFS_MOUNT_NOALIGN,		",noalign" },
 158		{ XFS_MOUNT_SWALLOC,		",swalloc" },
 159		{ XFS_MOUNT_NOUUID,		",nouuid" },
 160		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
 161		{ XFS_MOUNT_ATTR2,		",attr2" },
 162		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
 163		{ XFS_MOUNT_GRPID,		",grpid" },
 164		{ XFS_MOUNT_DISCARD,		",discard" },
 165		{ XFS_MOUNT_LARGEIO,		",largeio" },
 166		{ XFS_MOUNT_DAX_ALWAYS,		",dax=always" },
 167		{ XFS_MOUNT_DAX_NEVER,		",dax=never" },
 
 
 
 
 
 168		{ 0, NULL }
 169	};
 170	struct xfs_mount	*mp = XFS_M(root->d_sb);
 171	struct proc_xfs_info	*xfs_infop;
 172
 173	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
 174		if (mp->m_flags & xfs_infop->flag)
 175			seq_puts(m, xfs_infop->str);
 176	}
 
 
 
 
 177
 178	seq_printf(m, ",inode%d",
 179		(mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
 180
 181	if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
 182		seq_printf(m, ",allocsize=%dk",
 183			   (1 << mp->m_allocsize_log) >> 10);
 184
 185	if (mp->m_logbufs > 0)
 186		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
 187	if (mp->m_logbsize > 0)
 188		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
 189
 190	if (mp->m_logname)
 191		seq_show_option(m, "logdev", mp->m_logname);
 192	if (mp->m_rtname)
 193		seq_show_option(m, "rtdev", mp->m_rtname);
 194
 195	if (mp->m_dalign > 0)
 196		seq_printf(m, ",sunit=%d",
 197				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
 198	if (mp->m_swidth > 0)
 199		seq_printf(m, ",swidth=%d",
 200				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 201
 202	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
 203		seq_puts(m, ",usrquota");
 204	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
 205		seq_puts(m, ",uqnoenforce");
 
 
 206
 207	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
 208		if (mp->m_qflags & XFS_PQUOTA_ENFD)
 209			seq_puts(m, ",prjquota");
 210		else
 211			seq_puts(m, ",pqnoenforce");
 212	}
 213	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
 214		if (mp->m_qflags & XFS_GQUOTA_ENFD)
 215			seq_puts(m, ",grpquota");
 216		else
 217			seq_puts(m, ",gqnoenforce");
 218	}
 219
 220	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
 221		seq_puts(m, ",noquota");
 222
 223	return 0;
 224}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 225
 226/*
 227 * Set parameters for inode allocation heuristics, taking into account
 228 * filesystem size and inode32/inode64 mount options; i.e. specifically
 229 * whether or not XFS_MOUNT_SMALL_INUMS is set.
 230 *
 231 * Inode allocation patterns are altered only if inode32 is requested
 232 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
 233 * If altered, XFS_MOUNT_32BITINODES is set as well.
 234 *
 235 * An agcount independent of that in the mount structure is provided
 236 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
 237 * to the potentially higher ag count.
 238 *
 239 * Returns the maximum AG index which may contain inodes.
 240 */
 241xfs_agnumber_t
 242xfs_set_inode_alloc(
 243	struct xfs_mount *mp,
 244	xfs_agnumber_t	agcount)
 245{
 246	xfs_agnumber_t	index;
 247	xfs_agnumber_t	maxagi = 0;
 248	xfs_sb_t	*sbp = &mp->m_sb;
 249	xfs_agnumber_t	max_metadata;
 250	xfs_agino_t	agino;
 251	xfs_ino_t	ino;
 252
 253	/*
 254	 * Calculate how much should be reserved for inodes to meet
 255	 * the max inode percentage.  Used only for inode32.
 256	 */
 257	if (M_IGEO(mp)->maxicount) {
 258		uint64_t	icount;
 259
 260		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 261		do_div(icount, 100);
 262		icount += sbp->sb_agblocks - 1;
 263		do_div(icount, sbp->sb_agblocks);
 264		max_metadata = icount;
 265	} else {
 266		max_metadata = agcount;
 267	}
 268
 269	/* Get the last possible inode in the filesystem */
 270	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
 271	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 272
 273	/*
 274	 * If user asked for no more than 32-bit inodes, and the fs is
 275	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
 276	 * the allocator to accommodate the request.
 277	 */
 278	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
 279		mp->m_flags |= XFS_MOUNT_32BITINODES;
 280	else
 281		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
 282
 283	for (index = 0; index < agcount; index++) {
 284		struct xfs_perag	*pag;
 285
 286		ino = XFS_AGINO_TO_INO(mp, index, agino);
 287
 288		pag = xfs_perag_get(mp, index);
 289
 290		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
 291			if (ino > XFS_MAXINUMBER_32) {
 292				pag->pagi_inodeok = 0;
 293				pag->pagf_metadata = 0;
 294			} else {
 295				pag->pagi_inodeok = 1;
 296				maxagi++;
 297				if (index < max_metadata)
 298					pag->pagf_metadata = 1;
 299				else
 300					pag->pagf_metadata = 0;
 301			}
 302		} else {
 303			pag->pagi_inodeok = 1;
 304			pag->pagf_metadata = 0;
 305		}
 306
 307		xfs_perag_put(pag);
 308	}
 309
 310	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
 311}
 312
 313STATIC int
 314xfs_blkdev_get(
 315	xfs_mount_t		*mp,
 316	const char		*name,
 317	struct block_device	**bdevp)
 318{
 319	int			error = 0;
 320
 321	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 322				    mp);
 323	if (IS_ERR(*bdevp)) {
 324		error = PTR_ERR(*bdevp);
 325		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
 326	}
 327
 328	return error;
 329}
 330
 331STATIC void
 332xfs_blkdev_put(
 333	struct block_device	*bdev)
 334{
 335	if (bdev)
 336		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 337}
 338
 339void
 340xfs_blkdev_issue_flush(
 341	xfs_buftarg_t		*buftarg)
 342{
 343	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
 344}
 345
 346STATIC void
 347xfs_close_devices(
 348	struct xfs_mount	*mp)
 349{
 350	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
 351
 352	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 353		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
 354		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
 355
 356		xfs_free_buftarg(mp->m_logdev_targp);
 357		xfs_blkdev_put(logdev);
 358		fs_put_dax(dax_logdev);
 359	}
 360	if (mp->m_rtdev_targp) {
 361		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
 362		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
 363
 364		xfs_free_buftarg(mp->m_rtdev_targp);
 365		xfs_blkdev_put(rtdev);
 366		fs_put_dax(dax_rtdev);
 367	}
 368	xfs_free_buftarg(mp->m_ddev_targp);
 369	fs_put_dax(dax_ddev);
 370}
 371
 372/*
 373 * The file system configurations are:
 374 *	(1) device (partition) with data and internal log
 375 *	(2) logical volume with data and log subvolumes.
 376 *	(3) logical volume with data, log, and realtime subvolumes.
 377 *
 378 * We only have to handle opening the log and realtime volumes here if
 379 * they are present.  The data subvolume has already been opened by
 380 * get_sb_bdev() and is stored in sb->s_bdev.
 381 */
 382STATIC int
 383xfs_open_devices(
 384	struct xfs_mount	*mp)
 385{
 386	struct block_device	*ddev = mp->m_super->s_bdev;
 387	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
 388	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
 389	struct block_device	*logdev = NULL, *rtdev = NULL;
 390	int			error;
 391
 392	/*
 393	 * Open real time and log devices - order is important.
 394	 */
 395	if (mp->m_logname) {
 396		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
 397		if (error)
 398			goto out;
 399		dax_logdev = fs_dax_get_by_bdev(logdev);
 400	}
 401
 402	if (mp->m_rtname) {
 403		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
 404		if (error)
 405			goto out_close_logdev;
 406
 407		if (rtdev == ddev || rtdev == logdev) {
 408			xfs_warn(mp,
 409	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
 410			error = -EINVAL;
 411			goto out_close_rtdev;
 412		}
 413		dax_rtdev = fs_dax_get_by_bdev(rtdev);
 414	}
 415
 416	/*
 417	 * Setup xfs_mount buffer target pointers
 418	 */
 419	error = -ENOMEM;
 420	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
 421	if (!mp->m_ddev_targp)
 422		goto out_close_rtdev;
 423
 424	if (rtdev) {
 425		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
 
 426		if (!mp->m_rtdev_targp)
 427			goto out_free_ddev_targ;
 428	}
 429
 430	if (logdev && logdev != ddev) {
 431		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
 
 432		if (!mp->m_logdev_targp)
 433			goto out_free_rtdev_targ;
 434	} else {
 435		mp->m_logdev_targp = mp->m_ddev_targp;
 436	}
 437
 438	return 0;
 439
 440 out_free_rtdev_targ:
 441	if (mp->m_rtdev_targp)
 442		xfs_free_buftarg(mp->m_rtdev_targp);
 443 out_free_ddev_targ:
 444	xfs_free_buftarg(mp->m_ddev_targp);
 445 out_close_rtdev:
 446	xfs_blkdev_put(rtdev);
 447	fs_put_dax(dax_rtdev);
 448 out_close_logdev:
 449	if (logdev && logdev != ddev) {
 450		xfs_blkdev_put(logdev);
 451		fs_put_dax(dax_logdev);
 452	}
 453 out:
 454	fs_put_dax(dax_ddev);
 455	return error;
 456}
 457
 458/*
 459 * Setup xfs_mount buffer target pointers based on superblock
 460 */
 461STATIC int
 462xfs_setup_devices(
 463	struct xfs_mount	*mp)
 464{
 465	int			error;
 466
 467	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
 
 468	if (error)
 469		return error;
 470
 471	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
 472		unsigned int	log_sector_size = BBSIZE;
 473
 474		if (xfs_sb_version_hassector(&mp->m_sb))
 475			log_sector_size = mp->m_sb.sb_logsectsize;
 476		error = xfs_setsize_buftarg(mp->m_logdev_targp,
 
 477					    log_sector_size);
 478		if (error)
 479			return error;
 480	}
 481	if (mp->m_rtdev_targp) {
 482		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
 
 483					    mp->m_sb.sb_sectsize);
 484		if (error)
 485			return error;
 486	}
 487
 488	return 0;
 489}
 490
 491STATIC int
 492xfs_init_mount_workqueues(
 493	struct xfs_mount	*mp)
 494{
 495	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
 496			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
 497	if (!mp->m_buf_workqueue)
 498		goto out;
 499
 500	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
 501			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
 502	if (!mp->m_unwritten_workqueue)
 503		goto out_destroy_buf;
 504
 505	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
 506			WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
 507			0, mp->m_super->s_id);
 508	if (!mp->m_cil_workqueue)
 509		goto out_destroy_unwritten;
 510
 511	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
 512			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
 513	if (!mp->m_reclaim_workqueue)
 514		goto out_destroy_cil;
 515
 516	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
 517			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
 518	if (!mp->m_eofblocks_workqueue)
 519		goto out_destroy_reclaim;
 520
 521	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
 522					       mp->m_super->s_id);
 523	if (!mp->m_sync_workqueue)
 524		goto out_destroy_eofb;
 525
 526	return 0;
 527
 528out_destroy_eofb:
 529	destroy_workqueue(mp->m_eofblocks_workqueue);
 530out_destroy_reclaim:
 531	destroy_workqueue(mp->m_reclaim_workqueue);
 532out_destroy_cil:
 533	destroy_workqueue(mp->m_cil_workqueue);
 534out_destroy_unwritten:
 535	destroy_workqueue(mp->m_unwritten_workqueue);
 536out_destroy_buf:
 537	destroy_workqueue(mp->m_buf_workqueue);
 538out:
 539	return -ENOMEM;
 540}
 541
 542STATIC void
 543xfs_destroy_mount_workqueues(
 544	struct xfs_mount	*mp)
 545{
 546	destroy_workqueue(mp->m_sync_workqueue);
 547	destroy_workqueue(mp->m_eofblocks_workqueue);
 548	destroy_workqueue(mp->m_reclaim_workqueue);
 549	destroy_workqueue(mp->m_cil_workqueue);
 550	destroy_workqueue(mp->m_unwritten_workqueue);
 551	destroy_workqueue(mp->m_buf_workqueue);
 552}
 553
 554static void
 555xfs_flush_inodes_worker(
 556	struct work_struct	*work)
 557{
 558	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
 559						   m_flush_inodes_work);
 560	struct super_block	*sb = mp->m_super;
 561
 562	if (down_read_trylock(&sb->s_umount)) {
 563		sync_inodes_sb(sb);
 564		up_read(&sb->s_umount);
 565	}
 566}
 567
 568/*
 569 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
 570 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
 571 * for IO to complete so that we effectively throttle multiple callers to the
 572 * rate at which IO is completing.
 573 */
 574void
 575xfs_flush_inodes(
 576	struct xfs_mount	*mp)
 577{
 578	/*
 579	 * If flush_work() returns true then that means we waited for a flush
 580	 * which was already in progress.  Don't bother running another scan.
 581	 */
 582	if (flush_work(&mp->m_flush_inodes_work))
 583		return;
 584
 585	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
 586	flush_work(&mp->m_flush_inodes_work);
 587}
 588
 589/* Catch misguided souls that try to use this interface on XFS */
 590STATIC struct inode *
 591xfs_fs_alloc_inode(
 592	struct super_block	*sb)
 593{
 594	BUG();
 595	return NULL;
 596}
 597
 598#ifdef DEBUG
 599static void
 600xfs_check_delalloc(
 601	struct xfs_inode	*ip,
 602	int			whichfork)
 603{
 604	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
 605	struct xfs_bmbt_irec	got;
 606	struct xfs_iext_cursor	icur;
 607
 608	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
 609		return;
 610	do {
 611		if (isnullstartblock(got.br_startblock)) {
 612			xfs_warn(ip->i_mount,
 613	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
 614				ip->i_ino,
 615				whichfork == XFS_DATA_FORK ? "data" : "cow",
 616				got.br_startoff, got.br_blockcount);
 617		}
 618	} while (xfs_iext_next_extent(ifp, &icur, &got));
 619}
 620#else
 621#define xfs_check_delalloc(ip, whichfork)	do { } while (0)
 622#endif
 623
 624/*
 625 * Now that the generic code is guaranteed not to be accessing
 626 * the linux inode, we can inactivate and reclaim the inode.
 627 */
 628STATIC void
 629xfs_fs_destroy_inode(
 630	struct inode		*inode)
 631{
 632	struct xfs_inode	*ip = XFS_I(inode);
 633
 634	trace_xfs_destroy_inode(ip);
 635
 636	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 637	XFS_STATS_INC(ip->i_mount, vn_rele);
 638	XFS_STATS_INC(ip->i_mount, vn_remove);
 639
 640	xfs_inactive(ip);
 
 
 641
 642	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
 643		xfs_check_delalloc(ip, XFS_DATA_FORK);
 644		xfs_check_delalloc(ip, XFS_COW_FORK);
 645		ASSERT(0);
 646	}
 647
 648	XFS_STATS_INC(ip->i_mount, vn_reclaim);
 649
 650	/*
 651	 * We should never get here with one of the reclaim flags already set.
 652	 */
 653	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
 654	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
 655
 656	/*
 657	 * We always use background reclaim here because even if the
 658	 * inode is clean, it still may be under IO and hence we have
 659	 * to take the flush lock. The background reclaim path handles
 660	 * this more efficiently than we can here, so simply let background
 661	 * reclaim tear down all inodes.
 662	 */
 
 663	xfs_inode_set_reclaim_tag(ip);
 664}
 665
 666static void
 667xfs_fs_dirty_inode(
 668	struct inode			*inode,
 669	int				flag)
 670{
 671	struct xfs_inode		*ip = XFS_I(inode);
 672	struct xfs_mount		*mp = ip->i_mount;
 673	struct xfs_trans		*tp;
 674
 675	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
 676		return;
 677	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
 678		return;
 679
 680	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
 681		return;
 682	xfs_ilock(ip, XFS_ILOCK_EXCL);
 683	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 684	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
 685	xfs_trans_commit(tp);
 686}
 687
 688/*
 689 * Slab object creation initialisation for the XFS inode.
 690 * This covers only the idempotent fields in the XFS inode;
 691 * all other fields need to be initialised on allocation
 692 * from the slab. This avoids the need to repeatedly initialise
 693 * fields in the xfs inode that left in the initialise state
 694 * when freeing the inode.
 695 */
 696STATIC void
 697xfs_fs_inode_init_once(
 698	void			*inode)
 699{
 700	struct xfs_inode	*ip = inode;
 701
 702	memset(ip, 0, sizeof(struct xfs_inode));
 703
 704	/* vfs inode */
 705	inode_init_once(VFS_I(ip));
 706
 707	/* xfs inode */
 
 708	atomic_set(&ip->i_pincount, 0);
 709	spin_lock_init(&ip->i_flags_lock);
 
 
 
 
 
 
 
 
 710
 711	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
 712		     "xfsino", ip->i_ino);
 713	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
 714		     "xfsino", ip->i_ino);
 715}
 716
 717/*
 718 * We do an unlocked check for XFS_IDONTCACHE here because we are already
 719 * serialised against cache hits here via the inode->i_lock and igrab() in
 720 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
 721 * racing with us, and it avoids needing to grab a spinlock here for every inode
 722 * we drop the final reference on.
 
 
 723 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724STATIC int
 725xfs_fs_drop_inode(
 726	struct inode		*inode)
 
 727{
 728	struct xfs_inode	*ip = XFS_I(inode);
 
 
 
 
 
 
 
 
 
 729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730	/*
 731	 * If this unlinked inode is in the middle of recovery, don't
 732	 * drop the inode just yet; log recovery will take care of
 733	 * that.  See the comment for this inode flag.
 734	 */
 735	if (ip->i_flags & XFS_IRECOVERY) {
 736		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
 737		return 0;
 738	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739
 740	return generic_drop_inode(inode);
 741}
 742
 743static void
 744xfs_mount_free(
 745	struct xfs_mount	*mp)
 746{
 
 747	kfree(mp->m_rtname);
 748	kfree(mp->m_logname);
 749	kmem_free(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750}
 751
 752STATIC int
 753xfs_fs_sync_fs(
 754	struct super_block	*sb,
 755	int			wait)
 756{
 757	struct xfs_mount	*mp = XFS_M(sb);
 
 758
 759	/*
 760	 * Doing anything during the async pass would be counterproductive.
 
 
 
 
 
 761	 */
 762	if (!wait)
 
 763		return 0;
 
 
 
 
 
 764
 765	xfs_log_force(mp, XFS_LOG_SYNC);
 766	if (laptop_mode) {
 767		/*
 768		 * The disk must be active because we're syncing.
 769		 * We schedule log work now (now that the disk is
 770		 * active) instead of later (when it might not be).
 771		 */
 772		flush_delayed_work(&mp->m_log->l_work);
 773	}
 774
 775	return 0;
 776}
 777
 778STATIC int
 779xfs_fs_statfs(
 780	struct dentry		*dentry,
 781	struct kstatfs		*statp)
 782{
 783	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
 784	xfs_sb_t		*sbp = &mp->m_sb;
 785	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
 786	uint64_t		fakeinos, id;
 787	uint64_t		icount;
 788	uint64_t		ifree;
 789	uint64_t		fdblocks;
 790	xfs_extlen_t		lsize;
 791	int64_t			ffree;
 792
 793	statp->f_type = XFS_SUPER_MAGIC;
 794	statp->f_namelen = MAXNAMELEN - 1;
 795
 796	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
 797	statp->f_fsid.val[0] = (u32)id;
 798	statp->f_fsid.val[1] = (u32)(id >> 32);
 799
 800	icount = percpu_counter_sum(&mp->m_icount);
 801	ifree = percpu_counter_sum(&mp->m_ifree);
 802	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
 803
 804	spin_lock(&mp->m_sb_lock);
 805	statp->f_bsize = sbp->sb_blocksize;
 806	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
 807	statp->f_blocks = sbp->sb_dblocks - lsize;
 808	spin_unlock(&mp->m_sb_lock);
 809
 810	/* make sure statp->f_bfree does not underflow */
 811	statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
 812	statp->f_bavail = statp->f_bfree;
 813
 814	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
 815	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
 816	if (M_IGEO(mp)->maxicount)
 817		statp->f_files = min_t(typeof(statp->f_files),
 818					statp->f_files,
 819					M_IGEO(mp)->maxicount);
 820
 821	/* If sb_icount overshot maxicount, report actual allocation */
 822	statp->f_files = max_t(typeof(statp->f_files),
 823					statp->f_files,
 824					sbp->sb_icount);
 825
 826	/* make sure statp->f_ffree does not underflow */
 827	ffree = statp->f_files - (icount - ifree);
 828	statp->f_ffree = max_t(int64_t, ffree, 0);
 829
 
 830
 831	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
 832	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
 833			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
 834		xfs_qm_statvfs(ip, statp);
 835
 836	if (XFS_IS_REALTIME_MOUNT(mp) &&
 837	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
 838		statp->f_blocks = sbp->sb_rblocks;
 839		statp->f_bavail = statp->f_bfree =
 840			sbp->sb_frextents * sbp->sb_rextsize;
 841	}
 842
 843	return 0;
 844}
 845
 846STATIC void
 847xfs_save_resvblks(struct xfs_mount *mp)
 848{
 849	uint64_t resblks = 0;
 850
 851	mp->m_resblks_save = mp->m_resblks;
 852	xfs_reserve_blocks(mp, &resblks, NULL);
 853}
 854
 855STATIC void
 856xfs_restore_resvblks(struct xfs_mount *mp)
 857{
 858	uint64_t resblks;
 859
 860	if (mp->m_resblks_save) {
 861		resblks = mp->m_resblks_save;
 862		mp->m_resblks_save = 0;
 863	} else
 864		resblks = xfs_default_resblks(mp);
 865
 866	xfs_reserve_blocks(mp, &resblks, NULL);
 867}
 868
 869/*
 870 * Trigger writeback of all the dirty metadata in the file system.
 871 *
 872 * This ensures that the metadata is written to their location on disk rather
 873 * than just existing in transactions in the log. This means after a quiesce
 874 * there is no log replay required to write the inodes to disk - this is the
 875 * primary difference between a sync and a quiesce.
 876 *
 877 * We cancel log work early here to ensure all transactions the log worker may
 878 * run have finished before we clean up and log the superblock and write an
 879 * unmount record. The unfreeze process is responsible for restarting the log
 880 * worker correctly.
 881 */
 882void
 883xfs_quiesce_attr(
 884	struct xfs_mount	*mp)
 885{
 886	int	error = 0;
 
 
 
 
 
 
 887
 888	cancel_delayed_work_sync(&mp->m_log->l_work);
 
 889
 890	/* force the log to unpin objects from the now complete transactions */
 891	xfs_log_force(mp, XFS_LOG_SYNC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 892
 
 
 
 893
 894	/* Push the superblock and write an unmount record */
 895	error = xfs_log_sbcount(mp);
 896	if (error)
 897		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
 898				"Frozen image may not be consistent.");
 899	xfs_log_quiesce(mp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900}
 901
 902/*
 903 * Second stage of a freeze. The data is already frozen so we only
 904 * need to take care of the metadata. Once that's done sync the superblock
 905 * to the log to dirty it in case of a crash while frozen. This ensures that we
 906 * will recover the unlinked inode lists on the next mount.
 907 */
 908STATIC int
 909xfs_fs_freeze(
 910	struct super_block	*sb)
 911{
 912	struct xfs_mount	*mp = XFS_M(sb);
 913	unsigned int		flags;
 914	int			ret;
 915
 916	/*
 917	 * The filesystem is now frozen far enough that memory reclaim
 918	 * cannot safely operate on the filesystem. Hence we need to
 919	 * set a GFP_NOFS context here to avoid recursion deadlocks.
 920	 */
 921	flags = memalloc_nofs_save();
 922	xfs_stop_block_reaping(mp);
 923	xfs_save_resvblks(mp);
 924	xfs_quiesce_attr(mp);
 925	ret = xfs_sync_sb(mp, true);
 926	memalloc_nofs_restore(flags);
 927	return ret;
 928}
 929
 930STATIC int
 931xfs_fs_unfreeze(
 932	struct super_block	*sb)
 933{
 934	struct xfs_mount	*mp = XFS_M(sb);
 935
 936	xfs_restore_resvblks(mp);
 937	xfs_log_work_queue(mp);
 938	xfs_start_block_reaping(mp);
 939	return 0;
 940}
 941
 
 
 
 
 
 
 
 
 942/*
 943 * This function fills in xfs_mount_t fields based on mount args.
 944 * Note: the superblock _has_ now been read in.
 945 */
 946STATIC int
 947xfs_finish_flags(
 948	struct xfs_mount	*mp)
 949{
 950	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
 951
 952	/* Fail a mount where the logbuf is smaller than the log stripe */
 953	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
 954		if (mp->m_logbsize <= 0 &&
 955		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
 956			mp->m_logbsize = mp->m_sb.sb_logsunit;
 957		} else if (mp->m_logbsize > 0 &&
 958			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
 959			xfs_warn(mp,
 960		"logbuf size must be greater than or equal to log stripe size");
 961			return -EINVAL;
 962		}
 963	} else {
 964		/* Fail a mount if the logbuf is larger than 32K */
 965		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
 966			xfs_warn(mp,
 967		"logbuf size for version 1 logs must be 16K or 32K");
 968			return -EINVAL;
 969		}
 970	}
 971
 972	/*
 973	 * V5 filesystems always use attr2 format for attributes.
 974	 */
 975	if (xfs_sb_version_hascrc(&mp->m_sb) &&
 976	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
 977		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
 978			     "attr2 is always enabled for V5 filesystems.");
 979		return -EINVAL;
 980	}
 981
 982	/*
 983	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
 984	 * told by noattr2 to turn it off
 985	 */
 986	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
 987	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
 988		mp->m_flags |= XFS_MOUNT_ATTR2;
 989
 990	/*
 991	 * prohibit r/w mounts of read-only filesystems
 992	 */
 993	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
 994		xfs_warn(mp,
 995			"cannot mount a read-only filesystem as read-write");
 996		return -EROFS;
 997	}
 998
 999	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1000	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1001	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1002		xfs_warn(mp,
1003		  "Super block does not support project and group quota together");
1004		return -EINVAL;
1005	}
1006
1007	return 0;
1008}
1009
1010static int
1011xfs_init_percpu_counters(
1012	struct xfs_mount	*mp)
1013{
1014	int		error;
1015
1016	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1017	if (error)
1018		return -ENOMEM;
1019
1020	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1021	if (error)
1022		goto free_icount;
1023
1024	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1025	if (error)
1026		goto free_ifree;
1027
1028	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1029	if (error)
1030		goto free_fdblocks;
1031
1032	return 0;
1033
1034free_fdblocks:
1035	percpu_counter_destroy(&mp->m_fdblocks);
1036free_ifree:
1037	percpu_counter_destroy(&mp->m_ifree);
1038free_icount:
1039	percpu_counter_destroy(&mp->m_icount);
1040	return -ENOMEM;
1041}
1042
1043void
1044xfs_reinit_percpu_counters(
1045	struct xfs_mount	*mp)
1046{
1047	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1048	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1049	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1050}
1051
1052static void
1053xfs_destroy_percpu_counters(
1054	struct xfs_mount	*mp)
1055{
1056	percpu_counter_destroy(&mp->m_icount);
1057	percpu_counter_destroy(&mp->m_ifree);
1058	percpu_counter_destroy(&mp->m_fdblocks);
1059	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1060	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1061	percpu_counter_destroy(&mp->m_delalloc_blks);
1062}
1063
1064static void
1065xfs_fs_put_super(
1066	struct super_block	*sb)
1067{
1068	struct xfs_mount	*mp = XFS_M(sb);
1069
1070	/* if ->fill_super failed, we have no mount to tear down */
1071	if (!sb->s_fs_info)
1072		return;
1073
1074	xfs_notice(mp, "Unmounting Filesystem");
1075	xfs_filestream_unmount(mp);
1076	xfs_unmountfs(mp);
1077
1078	xfs_freesb(mp);
1079	free_percpu(mp->m_stats.xs_stats);
1080	xfs_destroy_percpu_counters(mp);
1081	xfs_destroy_mount_workqueues(mp);
1082	xfs_close_devices(mp);
1083
1084	sb->s_fs_info = NULL;
1085	xfs_mount_free(mp);
1086}
1087
1088static long
1089xfs_fs_nr_cached_objects(
1090	struct super_block	*sb,
1091	struct shrink_control	*sc)
 
1092{
1093	/* Paranoia: catch incorrect calls during mount setup or teardown */
1094	if (WARN_ON_ONCE(!sb->s_fs_info))
1095		return 0;
1096	return xfs_reclaim_inodes_count(XFS_M(sb));
1097}
1098
1099static long
1100xfs_fs_free_cached_objects(
1101	struct super_block	*sb,
1102	struct shrink_control	*sc)
1103{
1104	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1105}
1106
1107static const struct super_operations xfs_super_operations = {
1108	.alloc_inode		= xfs_fs_alloc_inode,
1109	.destroy_inode		= xfs_fs_destroy_inode,
1110	.dirty_inode		= xfs_fs_dirty_inode,
1111	.drop_inode		= xfs_fs_drop_inode,
1112	.put_super		= xfs_fs_put_super,
1113	.sync_fs		= xfs_fs_sync_fs,
1114	.freeze_fs		= xfs_fs_freeze,
1115	.unfreeze_fs		= xfs_fs_unfreeze,
1116	.statfs			= xfs_fs_statfs,
1117	.show_options		= xfs_fs_show_options,
1118	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1119	.free_cached_objects	= xfs_fs_free_cached_objects,
1120};
1121
1122static int
1123suffix_kstrtoint(
1124	const char	*s,
1125	unsigned int	base,
1126	int		*res)
1127{
1128	int		last, shift_left_factor = 0, _res;
1129	char		*value;
1130	int		ret = 0;
1131
1132	value = kstrdup(s, GFP_KERNEL);
1133	if (!value)
1134		return -ENOMEM;
1135
1136	last = strlen(value) - 1;
1137	if (value[last] == 'K' || value[last] == 'k') {
1138		shift_left_factor = 10;
1139		value[last] = '\0';
1140	}
1141	if (value[last] == 'M' || value[last] == 'm') {
1142		shift_left_factor = 20;
1143		value[last] = '\0';
1144	}
1145	if (value[last] == 'G' || value[last] == 'g') {
1146		shift_left_factor = 30;
1147		value[last] = '\0';
1148	}
1149
1150	if (kstrtoint(value, base, &_res))
1151		ret = -EINVAL;
1152	kfree(value);
1153	*res = _res << shift_left_factor;
1154	return ret;
1155}
1156
1157/*
1158 * Set mount state from a mount option.
1159 *
1160 * NOTE: mp->m_super is NULL here!
1161 */
1162static int
1163xfs_fc_parse_param(
1164	struct fs_context	*fc,
1165	struct fs_parameter	*param)
1166{
1167	struct xfs_mount	*mp = fc->s_fs_info;
1168	struct fs_parse_result	result;
1169	int			size = 0;
1170	int			opt;
1171
1172	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1173	if (opt < 0)
1174		return opt;
1175
1176	switch (opt) {
1177	case Opt_logbufs:
1178		mp->m_logbufs = result.uint_32;
1179		return 0;
1180	case Opt_logbsize:
1181		if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1182			return -EINVAL;
1183		return 0;
1184	case Opt_logdev:
1185		kfree(mp->m_logname);
1186		mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1187		if (!mp->m_logname)
1188			return -ENOMEM;
1189		return 0;
1190	case Opt_rtdev:
1191		kfree(mp->m_rtname);
1192		mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1193		if (!mp->m_rtname)
1194			return -ENOMEM;
1195		return 0;
1196	case Opt_allocsize:
1197		if (suffix_kstrtoint(param->string, 10, &size))
1198			return -EINVAL;
1199		mp->m_allocsize_log = ffs(size) - 1;
1200		mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1201		return 0;
1202	case Opt_grpid:
1203	case Opt_bsdgroups:
1204		mp->m_flags |= XFS_MOUNT_GRPID;
1205		return 0;
1206	case Opt_nogrpid:
1207	case Opt_sysvgroups:
1208		mp->m_flags &= ~XFS_MOUNT_GRPID;
1209		return 0;
1210	case Opt_wsync:
1211		mp->m_flags |= XFS_MOUNT_WSYNC;
1212		return 0;
1213	case Opt_norecovery:
1214		mp->m_flags |= XFS_MOUNT_NORECOVERY;
1215		return 0;
1216	case Opt_noalign:
1217		mp->m_flags |= XFS_MOUNT_NOALIGN;
1218		return 0;
1219	case Opt_swalloc:
1220		mp->m_flags |= XFS_MOUNT_SWALLOC;
1221		return 0;
1222	case Opt_sunit:
1223		mp->m_dalign = result.uint_32;
1224		return 0;
1225	case Opt_swidth:
1226		mp->m_swidth = result.uint_32;
1227		return 0;
1228	case Opt_inode32:
1229		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1230		return 0;
1231	case Opt_inode64:
1232		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1233		return 0;
1234	case Opt_nouuid:
1235		mp->m_flags |= XFS_MOUNT_NOUUID;
1236		return 0;
1237	case Opt_ikeep:
1238		mp->m_flags |= XFS_MOUNT_IKEEP;
1239		return 0;
1240	case Opt_noikeep:
1241		mp->m_flags &= ~XFS_MOUNT_IKEEP;
1242		return 0;
1243	case Opt_largeio:
1244		mp->m_flags |= XFS_MOUNT_LARGEIO;
1245		return 0;
1246	case Opt_nolargeio:
1247		mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1248		return 0;
1249	case Opt_attr2:
1250		mp->m_flags |= XFS_MOUNT_ATTR2;
1251		return 0;
1252	case Opt_noattr2:
1253		mp->m_flags &= ~XFS_MOUNT_ATTR2;
1254		mp->m_flags |= XFS_MOUNT_NOATTR2;
1255		return 0;
1256	case Opt_filestreams:
1257		mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1258		return 0;
1259	case Opt_noquota:
1260		mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1261		mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1262		mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1263		return 0;
1264	case Opt_quota:
1265	case Opt_uquota:
1266	case Opt_usrquota:
1267		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1268				 XFS_UQUOTA_ENFD);
1269		return 0;
1270	case Opt_qnoenforce:
1271	case Opt_uqnoenforce:
1272		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1273		mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1274		return 0;
1275	case Opt_pquota:
1276	case Opt_prjquota:
1277		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1278				 XFS_PQUOTA_ENFD);
1279		return 0;
1280	case Opt_pqnoenforce:
1281		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1282		mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1283		return 0;
1284	case Opt_gquota:
1285	case Opt_grpquota:
1286		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1287				 XFS_GQUOTA_ENFD);
1288		return 0;
1289	case Opt_gqnoenforce:
1290		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1291		mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1292		return 0;
1293	case Opt_discard:
1294		mp->m_flags |= XFS_MOUNT_DISCARD;
1295		return 0;
1296	case Opt_nodiscard:
1297		mp->m_flags &= ~XFS_MOUNT_DISCARD;
1298		return 0;
1299#ifdef CONFIG_FS_DAX
1300	case Opt_dax:
1301		xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS);
1302		return 0;
1303	case Opt_dax_enum:
1304		xfs_mount_set_dax_mode(mp, result.uint_32);
1305		return 0;
1306#endif
1307	default:
1308		xfs_warn(mp, "unknown mount option [%s].", param->key);
1309		return -EINVAL;
1310	}
1311
1312	return 0;
1313}
1314
1315static int
1316xfs_fc_validate_params(
1317	struct xfs_mount	*mp)
1318{
1319	/*
1320	 * no recovery flag requires a read-only mount
1321	 */
1322	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1323	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1324		xfs_warn(mp, "no-recovery mounts must be read-only.");
1325		return -EINVAL;
1326	}
1327
1328	if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1329	    (mp->m_dalign || mp->m_swidth)) {
1330		xfs_warn(mp,
1331	"sunit and swidth options incompatible with the noalign option");
1332		return -EINVAL;
1333	}
1334
1335	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1336		xfs_warn(mp, "quota support not available in this kernel.");
1337		return -EINVAL;
1338	}
1339
1340	if ((mp->m_dalign && !mp->m_swidth) ||
1341	    (!mp->m_dalign && mp->m_swidth)) {
1342		xfs_warn(mp, "sunit and swidth must be specified together");
1343		return -EINVAL;
1344	}
1345
1346	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1347		xfs_warn(mp,
1348	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1349			mp->m_swidth, mp->m_dalign);
1350		return -EINVAL;
1351	}
1352
1353	if (mp->m_logbufs != -1 &&
1354	    mp->m_logbufs != 0 &&
1355	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1356	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1357		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1358			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1359		return -EINVAL;
1360	}
1361
1362	if (mp->m_logbsize != -1 &&
1363	    mp->m_logbsize !=  0 &&
1364	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1365	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1366	     !is_power_of_2(mp->m_logbsize))) {
1367		xfs_warn(mp,
1368			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1369			mp->m_logbsize);
1370		return -EINVAL;
1371	}
1372
1373	if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1374	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1375	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1376		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1377			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1378		return -EINVAL;
1379	}
1380
1381	return 0;
1382}
1383
1384static int
1385xfs_fc_fill_super(
1386	struct super_block	*sb,
1387	struct fs_context	*fc)
1388{
1389	struct xfs_mount	*mp = sb->s_fs_info;
1390	struct inode		*root;
1391	int			flags = 0, error;
1392
1393	mp->m_super = sb;
 
1394
1395	error = xfs_fc_validate_params(mp);
1396	if (error)
1397		goto out_free_names;
1398
1399	sb_min_blocksize(sb, BBSIZE);
1400	sb->s_xattr = xfs_xattr_handlers;
1401	sb->s_export_op = &xfs_export_operations;
1402#ifdef CONFIG_XFS_QUOTA
1403	sb->s_qcop = &xfs_quotactl_operations;
1404	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1405#endif
1406	sb->s_op = &xfs_super_operations;
1407
1408	/*
1409	 * Delay mount work if the debug hook is set. This is debug
1410	 * instrumention to coordinate simulation of xfs mount failures with
1411	 * VFS superblock operations
1412	 */
1413	if (xfs_globals.mount_delay) {
1414		xfs_notice(mp, "Delaying mount for %d seconds.",
1415			xfs_globals.mount_delay);
1416		msleep(xfs_globals.mount_delay * 1000);
1417	}
1418
1419	if (fc->sb_flags & SB_SILENT)
1420		flags |= XFS_MFSI_QUIET;
1421
1422	error = xfs_open_devices(mp);
1423	if (error)
1424		goto out_free_names;
1425
1426	error = xfs_init_mount_workqueues(mp);
1427	if (error)
1428		goto out_close_devices;
1429
1430	error = xfs_init_percpu_counters(mp);
1431	if (error)
1432		goto out_destroy_workqueues;
1433
1434	/* Allocate stats memory before we do operations that might use it */
1435	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1436	if (!mp->m_stats.xs_stats) {
1437		error = -ENOMEM;
1438		goto out_destroy_counters;
1439	}
1440
1441	error = xfs_readsb(mp, flags);
1442	if (error)
1443		goto out_free_stats;
1444
1445	error = xfs_finish_flags(mp);
1446	if (error)
1447		goto out_free_sb;
1448
1449	error = xfs_setup_devices(mp);
1450	if (error)
1451		goto out_free_sb;
1452
1453	/*
1454	 * XFS block mappings use 54 bits to store the logical block offset.
1455	 * This should suffice to handle the maximum file size that the VFS
1456	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1457	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1458	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1459	 * to check this assertion.
1460	 *
1461	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1462	 * maximum pagecache offset in units of fs blocks.
1463	 */
1464	if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
1465		xfs_warn(mp,
1466"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1467			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1468			 XFS_MAX_FILEOFF);
1469		error = -EINVAL;
1470		goto out_free_sb;
1471	}
1472
1473	error = xfs_filestream_mount(mp);
1474	if (error)
1475		goto out_free_sb;
1476
1477	/*
1478	 * we must configure the block size in the superblock before we run the
1479	 * full mount process as the mount process can lookup and cache inodes.
 
 
 
 
1480	 */
1481	sb->s_magic = XFS_SUPER_MAGIC;
1482	sb->s_blocksize = mp->m_sb.sb_blocksize;
1483	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1484	sb->s_maxbytes = MAX_LFS_FILESIZE;
1485	sb->s_max_links = XFS_MAXLINK;
1486	sb->s_time_gran = 1;
1487	sb->s_time_min = S32_MIN;
1488	sb->s_time_max = S32_MAX;
1489	sb->s_iflags |= SB_I_CGROUPWB;
1490
1491	set_posix_acl_flag(sb);
1492
1493	/* version 5 superblocks support inode version counters. */
1494	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1495		sb->s_flags |= SB_I_VERSION;
1496
1497	if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1498		bool rtdev_is_dax = false, datadev_is_dax;
1499
1500		xfs_warn(mp,
1501		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1502
1503		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1504			sb->s_blocksize);
1505		if (mp->m_rtdev_targp)
1506			rtdev_is_dax = bdev_dax_supported(
1507				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1508		if (!rtdev_is_dax && !datadev_is_dax) {
1509			xfs_alert(mp,
1510			"DAX unsupported by block device. Turning off DAX.");
1511			xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1512		}
1513		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1514			xfs_alert(mp,
1515		"DAX and reflink cannot be used together!");
1516			error = -EINVAL;
1517			goto out_filestream_unmount;
1518		}
1519	}
1520
1521	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1522		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1523
1524		if (!blk_queue_discard(q)) {
1525			xfs_warn(mp, "mounting with \"discard\" option, but "
1526					"the device does not support discard");
1527			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1528		}
1529	}
1530
1531	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1532		if (mp->m_sb.sb_rblocks) {
1533			xfs_alert(mp,
1534	"reflink not compatible with realtime device!");
1535			error = -EINVAL;
1536			goto out_filestream_unmount;
1537		}
1538
1539		if (xfs_globals.always_cow) {
1540			xfs_info(mp, "using DEBUG-only always_cow mode.");
1541			mp->m_always_cow = true;
1542		}
1543	}
1544
1545	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1546		xfs_alert(mp,
1547	"reverse mapping btree not compatible with realtime device!");
1548		error = -EINVAL;
1549		goto out_filestream_unmount;
1550	}
1551
1552	error = xfs_mountfs(mp);
1553	if (error)
1554		goto out_filestream_unmount;
1555
1556	root = igrab(VFS_I(mp->m_rootip));
1557	if (!root) {
1558		error = -ENOENT;
1559		goto out_unmount;
 
 
 
 
1560	}
1561	sb->s_root = d_make_root(root);
1562	if (!sb->s_root) {
1563		error = -ENOMEM;
1564		goto out_unmount;
1565	}
1566
1567	return 0;
1568
1569 out_filestream_unmount:
1570	xfs_filestream_unmount(mp);
1571 out_free_sb:
1572	xfs_freesb(mp);
1573 out_free_stats:
1574	free_percpu(mp->m_stats.xs_stats);
1575 out_destroy_counters:
1576	xfs_destroy_percpu_counters(mp);
1577 out_destroy_workqueues:
1578	xfs_destroy_mount_workqueues(mp);
1579 out_close_devices:
1580	xfs_close_devices(mp);
1581 out_free_names:
1582	sb->s_fs_info = NULL;
1583	xfs_mount_free(mp);
1584	return error;
 
1585
 
 
 
 
1586 out_unmount:
1587	xfs_filestream_unmount(mp);
1588	xfs_unmountfs(mp);
1589	goto out_free_sb;
1590}
1591
1592static int
1593xfs_fc_get_tree(
1594	struct fs_context	*fc)
1595{
1596	return get_tree_bdev(fc, xfs_fc_fill_super);
1597}
1598
1599static int
1600xfs_remount_rw(
1601	struct xfs_mount	*mp)
1602{
1603	struct xfs_sb		*sbp = &mp->m_sb;
1604	int error;
1605
1606	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1607		xfs_warn(mp,
1608			"ro->rw transition prohibited on norecovery mount");
1609		return -EINVAL;
1610	}
1611
1612	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1613	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1614		xfs_warn(mp,
1615	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1616			(sbp->sb_features_ro_compat &
1617				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1618		return -EINVAL;
1619	}
1620
1621	mp->m_flags &= ~XFS_MOUNT_RDONLY;
1622
1623	/*
1624	 * If this is the first remount to writeable state we might have some
1625	 * superblock changes to update.
 
1626	 */
1627	if (mp->m_update_sb) {
1628		error = xfs_sync_sb(mp, false);
1629		if (error) {
1630			xfs_warn(mp, "failed to write sb changes");
1631			return error;
1632		}
1633		mp->m_update_sb = false;
1634	}
1635
1636	/*
1637	 * Fill out the reserve pool if it is empty. Use the stashed value if
1638	 * it is non-zero, otherwise go with the default.
1639	 */
1640	xfs_restore_resvblks(mp);
1641	xfs_log_work_queue(mp);
1642
1643	/* Recover any CoW blocks that never got remapped. */
1644	error = xfs_reflink_recover_cow(mp);
1645	if (error) {
1646		xfs_err(mp,
1647			"Error %d recovering leftover CoW allocations.", error);
1648		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1649		return error;
1650	}
1651	xfs_start_block_reaping(mp);
1652
1653	/* Create the per-AG metadata reservation pool .*/
1654	error = xfs_fs_reserve_ag_blocks(mp);
1655	if (error && error != -ENOSPC)
1656		return error;
1657
1658	return 0;
1659}
1660
1661static int
1662xfs_remount_ro(
1663	struct xfs_mount	*mp)
 
 
 
1664{
1665	int error;
1666
1667	/*
1668	 * Cancel background eofb scanning so it cannot race with the final
1669	 * log force+buftarg wait and deadlock the remount.
1670	 */
1671	xfs_stop_block_reaping(mp);
1672
1673	/* Get rid of any leftover CoW reservations... */
1674	error = xfs_icache_free_cowblocks(mp, NULL);
1675	if (error) {
1676		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1677		return error;
1678	}
1679
1680	/* Free the per-AG metadata reservation pool. */
1681	error = xfs_fs_unreserve_ag_blocks(mp);
1682	if (error) {
1683		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1684		return error;
1685	}
1686
1687	/*
1688	 * Before we sync the metadata, we need to free up the reserve block
1689	 * pool so that the used block count in the superblock on disk is
1690	 * correct at the end of the remount. Stash the current* reserve pool
1691	 * size so that if we get remounted rw, we can return it to the same
1692	 * size.
1693	 */
1694	xfs_save_resvblks(mp);
1695
1696	xfs_quiesce_attr(mp);
1697	mp->m_flags |= XFS_MOUNT_RDONLY;
1698
1699	return 0;
1700}
1701
1702/*
1703 * Logically we would return an error here to prevent users from believing
1704 * they might have changed mount options using remount which can't be changed.
1705 *
1706 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1707 * arguments in some cases so we can't blindly reject options, but have to
1708 * check for each specified option if it actually differs from the currently
1709 * set option and only reject it if that's the case.
1710 *
1711 * Until that is implemented we return success for every remount request, and
1712 * silently ignore all options that we can't actually change.
1713 */
1714static int
1715xfs_fc_reconfigure(
1716	struct fs_context *fc)
1717{
1718	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1719	struct xfs_mount        *new_mp = fc->s_fs_info;
1720	xfs_sb_t		*sbp = &mp->m_sb;
1721	int			flags = fc->sb_flags;
1722	int			error;
1723
1724	/* version 5 superblocks always support version counters. */
1725	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1726		fc->sb_flags |= SB_I_VERSION;
1727
1728	error = xfs_fc_validate_params(new_mp);
1729	if (error)
1730		return error;
1731
1732	sync_filesystem(mp->m_super);
1733
1734	/* inode32 -> inode64 */
1735	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1736	    !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1737		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1738		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1739	}
1740
1741	/* inode64 -> inode32 */
1742	if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1743	    (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1744		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1745		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1746	}
1747
1748	/* ro -> rw */
1749	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1750		error = xfs_remount_rw(mp);
1751		if (error)
1752			return error;
1753	}
1754
1755	/* rw -> ro */
1756	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1757		error = xfs_remount_ro(mp);
1758		if (error)
1759			return error;
1760	}
1761
1762	return 0;
1763}
1764
1765static void xfs_fc_free(
1766	struct fs_context	*fc)
 
 
1767{
1768	struct xfs_mount	*mp = fc->s_fs_info;
1769
1770	/*
1771	 * mp is stored in the fs_context when it is initialized.
1772	 * mp is transferred to the superblock on a successful mount,
1773	 * but if an error occurs before the transfer we have to free
1774	 * it here.
1775	 */
1776	if (mp)
1777		xfs_mount_free(mp);
1778}
1779
1780static const struct fs_context_operations xfs_context_ops = {
1781	.parse_param = xfs_fc_parse_param,
1782	.get_tree    = xfs_fc_get_tree,
1783	.reconfigure = xfs_fc_reconfigure,
1784	.free        = xfs_fc_free,
 
 
 
 
 
 
 
 
 
 
1785};
1786
1787static int xfs_init_fs_context(
1788	struct fs_context	*fc)
1789{
1790	struct xfs_mount	*mp;
1791
1792	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1793	if (!mp)
1794		return -ENOMEM;
1795
1796	spin_lock_init(&mp->m_sb_lock);
1797	spin_lock_init(&mp->m_agirotor_lock);
1798	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1799	spin_lock_init(&mp->m_perag_lock);
1800	mutex_init(&mp->m_growlock);
1801	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1802	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1803	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1804	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1805	mp->m_kobj.kobject.kset = xfs_kset;
1806	/*
1807	 * We don't create the finobt per-ag space reservation until after log
1808	 * recovery, so we must set this to true so that an ifree transaction
1809	 * started during log recovery will not depend on space reservations
1810	 * for finobt expansion.
1811	 */
1812	mp->m_finobt_nores = true;
1813
1814	/*
1815	 * These can be overridden by the mount option parsing.
1816	 */
1817	mp->m_logbufs = -1;
1818	mp->m_logbsize = -1;
1819	mp->m_allocsize_log = 16; /* 64k */
1820
1821	/*
1822	 * Copy binary VFS mount flags we are interested in.
1823	 */
1824	if (fc->sb_flags & SB_RDONLY)
1825		mp->m_flags |= XFS_MOUNT_RDONLY;
1826	if (fc->sb_flags & SB_DIRSYNC)
1827		mp->m_flags |= XFS_MOUNT_DIRSYNC;
1828	if (fc->sb_flags & SB_SYNCHRONOUS)
1829		mp->m_flags |= XFS_MOUNT_WSYNC;
1830
1831	fc->s_fs_info = mp;
1832	fc->ops = &xfs_context_ops;
1833
1834	return 0;
1835}
1836
1837static struct file_system_type xfs_fs_type = {
1838	.owner			= THIS_MODULE,
1839	.name			= "xfs",
1840	.init_fs_context	= xfs_init_fs_context,
1841	.parameters		= xfs_fs_parameters,
1842	.kill_sb		= kill_block_super,
1843	.fs_flags		= FS_REQUIRES_DEV,
1844};
1845MODULE_ALIAS_FS("xfs");
1846
1847STATIC int __init
1848xfs_init_zones(void)
1849{
1850	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1851						sizeof(struct xlog_ticket),
1852						0, 0, NULL);
 
 
 
 
 
 
 
 
 
1853	if (!xfs_log_ticket_zone)
1854		goto out;
1855
1856	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1857					sizeof(struct xfs_extent_free_item),
1858					0, 0, NULL);
1859	if (!xfs_bmap_free_item_zone)
1860		goto out_destroy_log_ticket_zone;
1861
1862	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1863					       sizeof(struct xfs_btree_cur),
1864					       0, 0, NULL);
1865	if (!xfs_btree_cur_zone)
1866		goto out_destroy_bmap_free_item_zone;
1867
1868	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1869					      sizeof(struct xfs_da_state),
1870					      0, 0, NULL);
1871	if (!xfs_da_state_zone)
1872		goto out_destroy_btree_cur_zone;
1873
1874	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1875					   sizeof(struct xfs_ifork),
1876					   0, 0, NULL);
 
 
1877	if (!xfs_ifork_zone)
1878		goto out_destroy_da_state_zone;
1879
1880	xfs_trans_zone = kmem_cache_create("xf_trans",
1881					   sizeof(struct xfs_trans),
1882					   0, 0, NULL);
1883	if (!xfs_trans_zone)
1884		goto out_destroy_ifork_zone;
1885
 
 
 
 
 
1886
1887	/*
1888	 * The size of the zone allocated buf log item is the maximum
1889	 * size possible under XFS.  This wastes a little bit of memory,
1890	 * but it is much faster.
1891	 */
1892	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1893					      sizeof(struct xfs_buf_log_item),
1894					      0, 0, NULL);
1895	if (!xfs_buf_item_zone)
1896		goto out_destroy_trans_zone;
1897
1898	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1899					(sizeof(struct xfs_efd_log_item) +
1900					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
1901					sizeof(struct xfs_extent)),
1902					0, 0, NULL);
1903	if (!xfs_efd_zone)
1904		goto out_destroy_buf_item_zone;
1905
1906	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1907					 (sizeof(struct xfs_efi_log_item) +
1908					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1909					 sizeof(struct xfs_extent)),
1910					 0, 0, NULL);
1911	if (!xfs_efi_zone)
1912		goto out_destroy_efd_zone;
1913
1914	xfs_inode_zone = kmem_cache_create("xfs_inode",
1915					   sizeof(struct xfs_inode), 0,
1916					   (SLAB_HWCACHE_ALIGN |
1917					    SLAB_RECLAIM_ACCOUNT |
1918					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1919					   xfs_fs_inode_init_once);
1920	if (!xfs_inode_zone)
1921		goto out_destroy_efi_zone;
1922
1923	xfs_ili_zone = kmem_cache_create("xfs_ili",
1924					 sizeof(struct xfs_inode_log_item), 0,
1925					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1926					 NULL);
1927	if (!xfs_ili_zone)
1928		goto out_destroy_inode_zone;
1929
1930	xfs_icreate_zone = kmem_cache_create("xfs_icr",
1931					     sizeof(struct xfs_icreate_item),
1932					     0, 0, NULL);
1933	if (!xfs_icreate_zone)
1934		goto out_destroy_ili_zone;
1935
1936	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1937					 sizeof(struct xfs_rud_log_item),
1938					 0, 0, NULL);
1939	if (!xfs_rud_zone)
1940		goto out_destroy_icreate_zone;
1941
1942	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
1943			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1944			0, 0, NULL);
1945	if (!xfs_rui_zone)
1946		goto out_destroy_rud_zone;
1947
1948	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1949					 sizeof(struct xfs_cud_log_item),
1950					 0, 0, NULL);
1951	if (!xfs_cud_zone)
1952		goto out_destroy_rui_zone;
1953
1954	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
1955			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1956			0, 0, NULL);
1957	if (!xfs_cui_zone)
1958		goto out_destroy_cud_zone;
1959
1960	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1961					 sizeof(struct xfs_bud_log_item),
1962					 0, 0, NULL);
1963	if (!xfs_bud_zone)
1964		goto out_destroy_cui_zone;
1965
1966	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
1967			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1968			0, 0, NULL);
1969	if (!xfs_bui_zone)
1970		goto out_destroy_bud_zone;
1971
1972	return 0;
1973
1974 out_destroy_bud_zone:
1975	kmem_cache_destroy(xfs_bud_zone);
1976 out_destroy_cui_zone:
1977	kmem_cache_destroy(xfs_cui_zone);
1978 out_destroy_cud_zone:
1979	kmem_cache_destroy(xfs_cud_zone);
1980 out_destroy_rui_zone:
1981	kmem_cache_destroy(xfs_rui_zone);
1982 out_destroy_rud_zone:
1983	kmem_cache_destroy(xfs_rud_zone);
1984 out_destroy_icreate_zone:
1985	kmem_cache_destroy(xfs_icreate_zone);
1986 out_destroy_ili_zone:
1987	kmem_cache_destroy(xfs_ili_zone);
1988 out_destroy_inode_zone:
1989	kmem_cache_destroy(xfs_inode_zone);
1990 out_destroy_efi_zone:
1991	kmem_cache_destroy(xfs_efi_zone);
1992 out_destroy_efd_zone:
1993	kmem_cache_destroy(xfs_efd_zone);
1994 out_destroy_buf_item_zone:
1995	kmem_cache_destroy(xfs_buf_item_zone);
 
 
1996 out_destroy_trans_zone:
1997	kmem_cache_destroy(xfs_trans_zone);
1998 out_destroy_ifork_zone:
1999	kmem_cache_destroy(xfs_ifork_zone);
 
 
2000 out_destroy_da_state_zone:
2001	kmem_cache_destroy(xfs_da_state_zone);
2002 out_destroy_btree_cur_zone:
2003	kmem_cache_destroy(xfs_btree_cur_zone);
2004 out_destroy_bmap_free_item_zone:
2005	kmem_cache_destroy(xfs_bmap_free_item_zone);
2006 out_destroy_log_ticket_zone:
2007	kmem_cache_destroy(xfs_log_ticket_zone);
 
 
 
 
2008 out:
2009	return -ENOMEM;
2010}
2011
2012STATIC void
2013xfs_destroy_zones(void)
2014{
2015	/*
2016	 * Make sure all delayed rcu free are flushed before we
2017	 * destroy caches.
2018	 */
2019	rcu_barrier();
2020	kmem_cache_destroy(xfs_bui_zone);
2021	kmem_cache_destroy(xfs_bud_zone);
2022	kmem_cache_destroy(xfs_cui_zone);
2023	kmem_cache_destroy(xfs_cud_zone);
2024	kmem_cache_destroy(xfs_rui_zone);
2025	kmem_cache_destroy(xfs_rud_zone);
2026	kmem_cache_destroy(xfs_icreate_zone);
2027	kmem_cache_destroy(xfs_ili_zone);
2028	kmem_cache_destroy(xfs_inode_zone);
2029	kmem_cache_destroy(xfs_efi_zone);
2030	kmem_cache_destroy(xfs_efd_zone);
2031	kmem_cache_destroy(xfs_buf_item_zone);
2032	kmem_cache_destroy(xfs_trans_zone);
2033	kmem_cache_destroy(xfs_ifork_zone);
2034	kmem_cache_destroy(xfs_da_state_zone);
2035	kmem_cache_destroy(xfs_btree_cur_zone);
2036	kmem_cache_destroy(xfs_bmap_free_item_zone);
2037	kmem_cache_destroy(xfs_log_ticket_zone);
2038}
2039
2040STATIC int __init
2041xfs_init_workqueues(void)
2042{
2043	/*
2044	 * The allocation workqueue can be used in memory reclaim situations
2045	 * (writepage path), and parallelism is only limited by the number of
2046	 * AGs in all the filesystems mounted. Hence use the default large
2047	 * max_active value for this workqueue.
2048	 */
2049	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2050			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2051	if (!xfs_alloc_wq)
2052		return -ENOMEM;
2053
2054	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
2055	if (!xfs_discard_wq)
2056		goto out_free_alloc_wq;
2057
2058	return 0;
2059out_free_alloc_wq:
2060	destroy_workqueue(xfs_alloc_wq);
2061	return -ENOMEM;
2062}
2063
2064STATIC void
2065xfs_destroy_workqueues(void)
2066{
2067	destroy_workqueue(xfs_discard_wq);
2068	destroy_workqueue(xfs_alloc_wq);
2069}
2070
2071STATIC int __init
2072init_xfs_fs(void)
2073{
2074	int			error;
2075
2076	xfs_check_ondisk_structs();
2077
2078	printk(KERN_INFO XFS_VERSION_STRING " with "
2079			 XFS_BUILD_OPTIONS " enabled\n");
2080
 
2081	xfs_dir_startup();
2082
2083	error = xfs_init_zones();
2084	if (error)
2085		goto out;
2086
2087	error = xfs_init_workqueues();
2088	if (error)
2089		goto out_destroy_zones;
2090
2091	error = xfs_mru_cache_init();
2092	if (error)
2093		goto out_destroy_wq;
2094
 
 
 
 
2095	error = xfs_buf_init();
2096	if (error)
2097		goto out_mru_cache_uninit;
2098
2099	error = xfs_init_procfs();
2100	if (error)
2101		goto out_buf_terminate;
2102
2103	error = xfs_sysctl_register();
2104	if (error)
2105		goto out_cleanup_procfs;
2106
2107	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2108	if (!xfs_kset) {
2109		error = -ENOMEM;
2110		goto out_sysctl_unregister;
2111	}
2112
2113	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2114
2115	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2116	if (!xfsstats.xs_stats) {
2117		error = -ENOMEM;
2118		goto out_kset_unregister;
2119	}
2120
2121	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2122			       "stats");
2123	if (error)
2124		goto out_free_stats;
2125
2126#ifdef DEBUG
2127	xfs_dbg_kobj.kobject.kset = xfs_kset;
2128	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2129	if (error)
2130		goto out_remove_stats_kobj;
2131#endif
2132
2133	error = xfs_qm_init();
2134	if (error)
2135		goto out_remove_dbg_kobj;
2136
2137	error = register_filesystem(&xfs_fs_type);
2138	if (error)
2139		goto out_qm_exit;
2140	return 0;
2141
2142 out_qm_exit:
2143	xfs_qm_exit();
2144 out_remove_dbg_kobj:
2145#ifdef DEBUG
2146	xfs_sysfs_del(&xfs_dbg_kobj);
2147 out_remove_stats_kobj:
2148#endif
2149	xfs_sysfs_del(&xfsstats.xs_kobj);
2150 out_free_stats:
2151	free_percpu(xfsstats.xs_stats);
2152 out_kset_unregister:
2153	kset_unregister(xfs_kset);
2154 out_sysctl_unregister:
2155	xfs_sysctl_unregister();
2156 out_cleanup_procfs:
2157	xfs_cleanup_procfs();
2158 out_buf_terminate:
2159	xfs_buf_terminate();
 
 
2160 out_mru_cache_uninit:
2161	xfs_mru_cache_uninit();
2162 out_destroy_wq:
2163	xfs_destroy_workqueues();
2164 out_destroy_zones:
2165	xfs_destroy_zones();
2166 out:
2167	return error;
2168}
2169
2170STATIC void __exit
2171exit_xfs_fs(void)
2172{
2173	xfs_qm_exit();
2174	unregister_filesystem(&xfs_fs_type);
2175#ifdef DEBUG
2176	xfs_sysfs_del(&xfs_dbg_kobj);
2177#endif
2178	xfs_sysfs_del(&xfsstats.xs_kobj);
2179	free_percpu(xfsstats.xs_stats);
2180	kset_unregister(xfs_kset);
2181	xfs_sysctl_unregister();
2182	xfs_cleanup_procfs();
2183	xfs_buf_terminate();
 
2184	xfs_mru_cache_uninit();
2185	xfs_destroy_workqueues();
2186	xfs_destroy_zones();
2187	xfs_uuid_table_free();
2188}
2189
2190module_init(init_xfs_fs);
2191module_exit(exit_xfs_fs);
2192
2193MODULE_AUTHOR("Silicon Graphics, Inc.");
2194MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2195MODULE_LICENSE("GPL");