Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/module.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/highmem.h>
  11#include <linux/time.h>
  12#include <linux/init.h>
  13#include <linux/seq_file.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/mount.h>
  17#include <linux/writeback.h>
  18#include <linux/statfs.h>
  19#include <linux/compat.h>
  20#include <linux/parser.h>
  21#include <linux/ctype.h>
  22#include <linux/namei.h>
  23#include <linux/miscdevice.h>
  24#include <linux/magic.h>
  25#include <linux/slab.h>
  26#include <linux/cleancache.h>
  27#include <linux/ratelimit.h>
  28#include <linux/crc32c.h>
  29#include <linux/btrfs.h>
  30#include "delayed-inode.h"
  31#include "ctree.h"
  32#include "disk-io.h"
  33#include "transaction.h"
  34#include "btrfs_inode.h"
  35#include "print-tree.h"
  36#include "props.h"
  37#include "xattr.h"
  38#include "volumes.h"
  39#include "export.h"
  40#include "compression.h"
  41#include "rcu-string.h"
  42#include "dev-replace.h"
  43#include "free-space-cache.h"
  44#include "backref.h"
  45#include "space-info.h"
  46#include "sysfs.h"
  47#include "tests/btrfs-tests.h"
  48#include "block-group.h"
 
  49
  50#include "qgroup.h"
  51#define CREATE_TRACE_POINTS
  52#include <trace/events/btrfs.h>
  53
  54static const struct super_operations btrfs_super_ops;
  55
  56/*
  57 * Types for mounting the default subvolume and a subvolume explicitly
  58 * requested by subvol=/path. That way the callchain is straightforward and we
  59 * don't have to play tricks with the mount options and recursive calls to
  60 * btrfs_mount.
  61 *
  62 * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
  63 */
  64static struct file_system_type btrfs_fs_type;
  65static struct file_system_type btrfs_root_fs_type;
  66
  67static int btrfs_remount(struct super_block *sb, int *flags, char *data);
  68
  69const char *btrfs_decode_error(int errno)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70{
  71	char *errstr = "unknown";
  72
  73	switch (errno) {
  74	case -EIO:
 
 
 
  75		errstr = "IO failure";
  76		break;
  77	case -ENOMEM:
  78		errstr = "Out of memory";
  79		break;
  80	case -EROFS:
  81		errstr = "Readonly filesystem";
  82		break;
  83	case -EEXIST:
  84		errstr = "Object already exists";
  85		break;
  86	case -ENOSPC:
  87		errstr = "No space left";
  88		break;
  89	case -ENOENT:
  90		errstr = "No such entry";
 
 
 
 
 
 
 
 
 
  91		break;
  92	}
  93
  94	return errstr;
  95}
  96
  97/*
  98 * __btrfs_handle_fs_error decodes expected errors from the caller and
  99 * invokes the appropriate error response.
 100 */
 101__cold
 102void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
 103		       unsigned int line, int errno, const char *fmt, ...)
 104{
 105	struct super_block *sb = fs_info->sb;
 106#ifdef CONFIG_PRINTK
 107	const char *errstr;
 108#endif
 109
 110	/*
 111	 * Special case: if the error is EROFS, and we're already
 112	 * under SB_RDONLY, then it is safe here.
 113	 */
 114	if (errno == -EROFS && sb_rdonly(sb))
 115  		return;
 116
 117#ifdef CONFIG_PRINTK
 118	errstr = btrfs_decode_error(errno);
 119	if (fmt) {
 120		struct va_format vaf;
 121		va_list args;
 122
 123		va_start(args, fmt);
 124		vaf.fmt = fmt;
 125		vaf.va = &args;
 126
 127		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
 128			sb->s_id, function, line, errno, errstr, &vaf);
 129		va_end(args);
 130	} else {
 131		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
 132			sb->s_id, function, line, errno, errstr);
 133	}
 134#endif
 135
 136	/*
 137	 * Today we only save the error info to memory.  Long term we'll
 138	 * also send it down to the disk
 139	 */
 140	set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 141
 142	/* Don't go through full error handling during mount */
 143	if (!(sb->s_flags & SB_BORN))
 144		return;
 145
 146	if (sb_rdonly(sb))
 147		return;
 148
 
 
 149	/* btrfs handle error by forcing the filesystem readonly */
 150	sb->s_flags |= SB_RDONLY;
 151	btrfs_info(fs_info, "forced readonly");
 152	/*
 153	 * Note that a running device replace operation is not canceled here
 154	 * although there is no way to update the progress. It would add the
 155	 * risk of a deadlock, therefore the canceling is omitted. The only
 156	 * penalty is that some I/O remains active until the procedure
 157	 * completes. The next time when the filesystem is mounted writable
 158	 * again, the device replace operation continues.
 159	 */
 160}
 161
 162#ifdef CONFIG_PRINTK
 163static const char * const logtypes[] = {
 164	"emergency",
 165	"alert",
 166	"critical",
 167	"error",
 168	"warning",
 169	"notice",
 170	"info",
 171	"debug",
 172};
 173
 174
 175/*
 176 * Use one ratelimit state per log level so that a flood of less important
 177 * messages doesn't cause more important ones to be dropped.
 178 */
 179static struct ratelimit_state printk_limits[] = {
 180	RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
 181	RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
 182	RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
 183	RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
 184	RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
 185	RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
 186	RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
 187	RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
 188};
 189
 190void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 191{
 192	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
 193	struct va_format vaf;
 194	va_list args;
 195	int kern_level;
 196	const char *type = logtypes[4];
 197	struct ratelimit_state *ratelimit = &printk_limits[4];
 198
 199	va_start(args, fmt);
 200
 201	while ((kern_level = printk_get_level(fmt)) != 0) {
 202		size_t size = printk_skip_level(fmt) - fmt;
 203
 204		if (kern_level >= '0' && kern_level <= '7') {
 205			memcpy(lvl, fmt,  size);
 206			lvl[size] = '\0';
 207			type = logtypes[kern_level - '0'];
 208			ratelimit = &printk_limits[kern_level - '0'];
 209		}
 210		fmt += size;
 211	}
 212
 213	vaf.fmt = fmt;
 214	vaf.va = &args;
 215
 216	if (__ratelimit(ratelimit))
 217		printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
 218			fs_info ? fs_info->sb->s_id : "<unknown>", &vaf);
 219
 220	va_end(args);
 221}
 222#endif
 223
 224/*
 225 * We only mark the transaction aborted and then set the file system read-only.
 226 * This will prevent new transactions from starting or trying to join this
 227 * one.
 228 *
 229 * This means that error recovery at the call site is limited to freeing
 230 * any local memory allocations and passing the error code up without
 231 * further cleanup. The transaction should complete as it normally would
 232 * in the call path but will return -EIO.
 233 *
 234 * We'll complete the cleanup in btrfs_end_transaction and
 235 * btrfs_commit_transaction.
 236 */
 237__cold
 238void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
 239			       const char *function,
 240			       unsigned int line, int errno)
 241{
 242	struct btrfs_fs_info *fs_info = trans->fs_info;
 243
 244	trans->aborted = errno;
 245	/* Nothing used. The other threads that have joined this
 246	 * transaction may be able to continue. */
 247	if (!trans->dirty && list_empty(&trans->new_bgs)) {
 248		const char *errstr;
 249
 250		errstr = btrfs_decode_error(errno);
 251		btrfs_warn(fs_info,
 252		           "%s:%d: Aborting unused transaction(%s).",
 253		           function, line, errstr);
 254		return;
 255	}
 256	WRITE_ONCE(trans->transaction->aborted, errno);
 257	/* Wake up anybody who may be waiting on this transaction */
 258	wake_up(&fs_info->transaction_wait);
 259	wake_up(&fs_info->transaction_blocked_wait);
 260	__btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
 261}
 262/*
 263 * __btrfs_panic decodes unexpected, fatal errors from the caller,
 264 * issues an alert, and either panics or BUGs, depending on mount options.
 265 */
 266__cold
 267void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
 268		   unsigned int line, int errno, const char *fmt, ...)
 269{
 270	char *s_id = "<unknown>";
 271	const char *errstr;
 272	struct va_format vaf = { .fmt = fmt };
 273	va_list args;
 274
 275	if (fs_info)
 276		s_id = fs_info->sb->s_id;
 277
 278	va_start(args, fmt);
 279	vaf.va = &args;
 280
 281	errstr = btrfs_decode_error(errno);
 282	if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
 283		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
 284			s_id, function, line, &vaf, errno, errstr);
 285
 286	btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
 287		   function, line, &vaf, errno, errstr);
 288	va_end(args);
 289	/* Caller calls BUG() */
 290}
 291
 292static void btrfs_put_super(struct super_block *sb)
 293{
 294	close_ctree(btrfs_sb(sb));
 295}
 296
 297enum {
 298	Opt_acl, Opt_noacl,
 299	Opt_clear_cache,
 300	Opt_commit_interval,
 301	Opt_compress,
 302	Opt_compress_force,
 303	Opt_compress_force_type,
 304	Opt_compress_type,
 305	Opt_degraded,
 306	Opt_device,
 307	Opt_fatal_errors,
 308	Opt_flushoncommit, Opt_noflushoncommit,
 309	Opt_inode_cache, Opt_noinode_cache,
 310	Opt_max_inline,
 311	Opt_barrier, Opt_nobarrier,
 312	Opt_datacow, Opt_nodatacow,
 313	Opt_datasum, Opt_nodatasum,
 314	Opt_defrag, Opt_nodefrag,
 315	Opt_discard, Opt_nodiscard,
 316	Opt_nologreplay,
 317	Opt_norecovery,
 318	Opt_ratio,
 319	Opt_rescan_uuid_tree,
 320	Opt_skip_balance,
 321	Opt_space_cache, Opt_no_space_cache,
 322	Opt_space_cache_version,
 323	Opt_ssd, Opt_nossd,
 324	Opt_ssd_spread, Opt_nossd_spread,
 325	Opt_subvol,
 326	Opt_subvol_empty,
 327	Opt_subvolid,
 328	Opt_thread_pool,
 329	Opt_treelog, Opt_notreelog,
 330	Opt_usebackuproot,
 331	Opt_user_subvol_rm_allowed,
 332
 
 
 
 
 
 333	/* Deprecated options */
 334	Opt_alloc_start,
 335	Opt_recovery,
 336	Opt_subvolrootid,
 337
 338	/* Debugging options */
 339	Opt_check_integrity,
 340	Opt_check_integrity_including_extent_data,
 341	Opt_check_integrity_print_mask,
 342	Opt_enospc_debug, Opt_noenospc_debug,
 343#ifdef CONFIG_BTRFS_DEBUG
 344	Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
 345#endif
 346#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 347	Opt_ref_verify,
 348#endif
 349	Opt_err,
 350};
 351
 352static const match_table_t tokens = {
 353	{Opt_acl, "acl"},
 354	{Opt_noacl, "noacl"},
 355	{Opt_clear_cache, "clear_cache"},
 356	{Opt_commit_interval, "commit=%u"},
 357	{Opt_compress, "compress"},
 358	{Opt_compress_type, "compress=%s"},
 359	{Opt_compress_force, "compress-force"},
 360	{Opt_compress_force_type, "compress-force=%s"},
 361	{Opt_degraded, "degraded"},
 362	{Opt_device, "device=%s"},
 363	{Opt_fatal_errors, "fatal_errors=%s"},
 364	{Opt_flushoncommit, "flushoncommit"},
 365	{Opt_noflushoncommit, "noflushoncommit"},
 366	{Opt_inode_cache, "inode_cache"},
 367	{Opt_noinode_cache, "noinode_cache"},
 368	{Opt_max_inline, "max_inline=%s"},
 369	{Opt_barrier, "barrier"},
 370	{Opt_nobarrier, "nobarrier"},
 371	{Opt_datacow, "datacow"},
 372	{Opt_nodatacow, "nodatacow"},
 373	{Opt_datasum, "datasum"},
 374	{Opt_nodatasum, "nodatasum"},
 375	{Opt_defrag, "autodefrag"},
 376	{Opt_nodefrag, "noautodefrag"},
 377	{Opt_discard, "discard"},
 
 378	{Opt_nodiscard, "nodiscard"},
 379	{Opt_nologreplay, "nologreplay"},
 380	{Opt_norecovery, "norecovery"},
 381	{Opt_ratio, "metadata_ratio=%u"},
 382	{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
 383	{Opt_skip_balance, "skip_balance"},
 384	{Opt_space_cache, "space_cache"},
 385	{Opt_no_space_cache, "nospace_cache"},
 386	{Opt_space_cache_version, "space_cache=%s"},
 387	{Opt_ssd, "ssd"},
 388	{Opt_nossd, "nossd"},
 389	{Opt_ssd_spread, "ssd_spread"},
 390	{Opt_nossd_spread, "nossd_spread"},
 391	{Opt_subvol, "subvol=%s"},
 392	{Opt_subvol_empty, "subvol="},
 393	{Opt_subvolid, "subvolid=%s"},
 394	{Opt_thread_pool, "thread_pool=%u"},
 395	{Opt_treelog, "treelog"},
 396	{Opt_notreelog, "notreelog"},
 397	{Opt_usebackuproot, "usebackuproot"},
 398	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
 399
 
 
 
 
 
 
 
 400	/* Deprecated options */
 401	{Opt_alloc_start, "alloc_start=%s"},
 402	{Opt_recovery, "recovery"},
 403	{Opt_subvolrootid, "subvolrootid=%d"},
 404
 405	/* Debugging options */
 406	{Opt_check_integrity, "check_int"},
 407	{Opt_check_integrity_including_extent_data, "check_int_data"},
 408	{Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
 409	{Opt_enospc_debug, "enospc_debug"},
 410	{Opt_noenospc_debug, "noenospc_debug"},
 411#ifdef CONFIG_BTRFS_DEBUG
 412	{Opt_fragment_data, "fragment=data"},
 413	{Opt_fragment_metadata, "fragment=metadata"},
 414	{Opt_fragment_all, "fragment=all"},
 415#endif
 416#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 417	{Opt_ref_verify, "ref_verify"},
 418#endif
 419	{Opt_err, NULL},
 420};
 421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422/*
 423 * Regular mount options parser.  Everything that is needed only when
 424 * reading in a new superblock is parsed here.
 425 * XXX JDM: This needs to be cleaned up for remount.
 426 */
 427int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
 428			unsigned long new_flags)
 429{
 430	substring_t args[MAX_OPT_ARGS];
 431	char *p, *num;
 432	u64 cache_gen;
 433	int intarg;
 434	int ret = 0;
 435	char *compress_type;
 436	bool compress_force = false;
 437	enum btrfs_compression_type saved_compress_type;
 
 438	bool saved_compress_force;
 439	int no_compress = 0;
 440
 441	cache_gen = btrfs_super_cache_generation(info->super_copy);
 442	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
 443		btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
 444	else if (cache_gen)
 445		btrfs_set_opt(info->mount_opt, SPACE_CACHE);
 446
 447	/*
 448	 * Even the options are empty, we still need to do extra check
 449	 * against new flags
 450	 */
 451	if (!options)
 452		goto check;
 453
 454	while ((p = strsep(&options, ",")) != NULL) {
 455		int token;
 456		if (!*p)
 457			continue;
 458
 459		token = match_token(p, tokens, args);
 460		switch (token) {
 461		case Opt_degraded:
 462			btrfs_info(info, "allowing degraded mounts");
 463			btrfs_set_opt(info->mount_opt, DEGRADED);
 464			break;
 465		case Opt_subvol:
 466		case Opt_subvol_empty:
 467		case Opt_subvolid:
 468		case Opt_subvolrootid:
 469		case Opt_device:
 470			/*
 471			 * These are parsed by btrfs_parse_subvol_options or
 472			 * btrfs_parse_device_options and can be ignored here.
 473			 */
 474			break;
 475		case Opt_nodatasum:
 476			btrfs_set_and_info(info, NODATASUM,
 477					   "setting nodatasum");
 478			break;
 479		case Opt_datasum:
 480			if (btrfs_test_opt(info, NODATASUM)) {
 481				if (btrfs_test_opt(info, NODATACOW))
 482					btrfs_info(info,
 483						   "setting datasum, datacow enabled");
 484				else
 485					btrfs_info(info, "setting datasum");
 486			}
 487			btrfs_clear_opt(info->mount_opt, NODATACOW);
 488			btrfs_clear_opt(info->mount_opt, NODATASUM);
 489			break;
 490		case Opt_nodatacow:
 491			if (!btrfs_test_opt(info, NODATACOW)) {
 492				if (!btrfs_test_opt(info, COMPRESS) ||
 493				    !btrfs_test_opt(info, FORCE_COMPRESS)) {
 494					btrfs_info(info,
 495						   "setting nodatacow, compression disabled");
 496				} else {
 497					btrfs_info(info, "setting nodatacow");
 498				}
 499			}
 500			btrfs_clear_opt(info->mount_opt, COMPRESS);
 501			btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 502			btrfs_set_opt(info->mount_opt, NODATACOW);
 503			btrfs_set_opt(info->mount_opt, NODATASUM);
 504			break;
 505		case Opt_datacow:
 506			btrfs_clear_and_info(info, NODATACOW,
 507					     "setting datacow");
 508			break;
 509		case Opt_compress_force:
 510		case Opt_compress_force_type:
 511			compress_force = true;
 512			/* Fallthrough */
 513		case Opt_compress:
 514		case Opt_compress_type:
 515			saved_compress_type = btrfs_test_opt(info,
 516							     COMPRESS) ?
 517				info->compress_type : BTRFS_COMPRESS_NONE;
 518			saved_compress_force =
 519				btrfs_test_opt(info, FORCE_COMPRESS);
 
 520			if (token == Opt_compress ||
 521			    token == Opt_compress_force ||
 522			    strncmp(args[0].from, "zlib", 4) == 0) {
 523				compress_type = "zlib";
 524
 525				info->compress_type = BTRFS_COMPRESS_ZLIB;
 526				info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
 527				/*
 528				 * args[0] contains uninitialized data since
 529				 * for these tokens we don't expect any
 530				 * parameter.
 531				 */
 532				if (token != Opt_compress &&
 533				    token != Opt_compress_force)
 534					info->compress_level =
 535					  btrfs_compress_str2level(
 536							BTRFS_COMPRESS_ZLIB,
 537							args[0].from + 4);
 538				btrfs_set_opt(info->mount_opt, COMPRESS);
 539				btrfs_clear_opt(info->mount_opt, NODATACOW);
 540				btrfs_clear_opt(info->mount_opt, NODATASUM);
 541				no_compress = 0;
 542			} else if (strncmp(args[0].from, "lzo", 3) == 0) {
 543				compress_type = "lzo";
 544				info->compress_type = BTRFS_COMPRESS_LZO;
 
 545				btrfs_set_opt(info->mount_opt, COMPRESS);
 546				btrfs_clear_opt(info->mount_opt, NODATACOW);
 547				btrfs_clear_opt(info->mount_opt, NODATASUM);
 548				btrfs_set_fs_incompat(info, COMPRESS_LZO);
 549				no_compress = 0;
 550			} else if (strncmp(args[0].from, "zstd", 4) == 0) {
 551				compress_type = "zstd";
 552				info->compress_type = BTRFS_COMPRESS_ZSTD;
 553				info->compress_level =
 554					btrfs_compress_str2level(
 555							 BTRFS_COMPRESS_ZSTD,
 556							 args[0].from + 4);
 557				btrfs_set_opt(info->mount_opt, COMPRESS);
 558				btrfs_clear_opt(info->mount_opt, NODATACOW);
 559				btrfs_clear_opt(info->mount_opt, NODATASUM);
 560				btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
 561				no_compress = 0;
 562			} else if (strncmp(args[0].from, "no", 2) == 0) {
 563				compress_type = "no";
 
 
 564				btrfs_clear_opt(info->mount_opt, COMPRESS);
 565				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 566				compress_force = false;
 567				no_compress++;
 568			} else {
 569				ret = -EINVAL;
 570				goto out;
 571			}
 572
 573			if (compress_force) {
 574				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
 575			} else {
 576				/*
 577				 * If we remount from compress-force=xxx to
 578				 * compress=xxx, we need clear FORCE_COMPRESS
 579				 * flag, otherwise, there is no way for users
 580				 * to disable forcible compression separately.
 581				 */
 582				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 583			}
 584			if ((btrfs_test_opt(info, COMPRESS) &&
 585			     (info->compress_type != saved_compress_type ||
 586			      compress_force != saved_compress_force)) ||
 587			    (!btrfs_test_opt(info, COMPRESS) &&
 588			     no_compress == 1)) {
 589				btrfs_info(info, "%s %s compression, level %d",
 590					   (compress_force) ? "force" : "use",
 591					   compress_type, info->compress_level);
 592			}
 593			compress_force = false;
 594			break;
 595		case Opt_ssd:
 596			btrfs_set_and_info(info, SSD,
 597					   "enabling ssd optimizations");
 598			btrfs_clear_opt(info->mount_opt, NOSSD);
 599			break;
 600		case Opt_ssd_spread:
 601			btrfs_set_and_info(info, SSD,
 602					   "enabling ssd optimizations");
 603			btrfs_set_and_info(info, SSD_SPREAD,
 604					   "using spread ssd allocation scheme");
 605			btrfs_clear_opt(info->mount_opt, NOSSD);
 606			break;
 607		case Opt_nossd:
 608			btrfs_set_opt(info->mount_opt, NOSSD);
 609			btrfs_clear_and_info(info, SSD,
 610					     "not using ssd optimizations");
 611			/* Fallthrough */
 612		case Opt_nossd_spread:
 613			btrfs_clear_and_info(info, SSD_SPREAD,
 614					     "not using spread ssd allocation scheme");
 615			break;
 616		case Opt_barrier:
 617			btrfs_clear_and_info(info, NOBARRIER,
 618					     "turning on barriers");
 619			break;
 620		case Opt_nobarrier:
 621			btrfs_set_and_info(info, NOBARRIER,
 622					   "turning off barriers");
 623			break;
 624		case Opt_thread_pool:
 625			ret = match_int(&args[0], &intarg);
 626			if (ret) {
 627				goto out;
 628			} else if (intarg == 0) {
 629				ret = -EINVAL;
 630				goto out;
 631			}
 632			info->thread_pool_size = intarg;
 633			break;
 634		case Opt_max_inline:
 635			num = match_strdup(&args[0]);
 636			if (num) {
 637				info->max_inline = memparse(num, NULL);
 638				kfree(num);
 639
 640				if (info->max_inline) {
 641					info->max_inline = min_t(u64,
 642						info->max_inline,
 643						info->sectorsize);
 644				}
 645				btrfs_info(info, "max_inline at %llu",
 646					   info->max_inline);
 647			} else {
 648				ret = -ENOMEM;
 649				goto out;
 650			}
 651			break;
 652		case Opt_alloc_start:
 653			btrfs_info(info,
 654				"option alloc_start is obsolete, ignored");
 655			break;
 656		case Opt_acl:
 657#ifdef CONFIG_BTRFS_FS_POSIX_ACL
 658			info->sb->s_flags |= SB_POSIXACL;
 659			break;
 660#else
 661			btrfs_err(info, "support for ACL not compiled in!");
 662			ret = -EINVAL;
 663			goto out;
 664#endif
 665		case Opt_noacl:
 666			info->sb->s_flags &= ~SB_POSIXACL;
 667			break;
 668		case Opt_notreelog:
 669			btrfs_set_and_info(info, NOTREELOG,
 670					   "disabling tree log");
 671			break;
 672		case Opt_treelog:
 673			btrfs_clear_and_info(info, NOTREELOG,
 674					     "enabling tree log");
 675			break;
 676		case Opt_norecovery:
 677		case Opt_nologreplay:
 
 
 678			btrfs_set_and_info(info, NOLOGREPLAY,
 679					   "disabling log replay at mount time");
 680			break;
 681		case Opt_flushoncommit:
 682			btrfs_set_and_info(info, FLUSHONCOMMIT,
 683					   "turning on flush-on-commit");
 684			break;
 685		case Opt_noflushoncommit:
 686			btrfs_clear_and_info(info, FLUSHONCOMMIT,
 687					     "turning off flush-on-commit");
 688			break;
 689		case Opt_ratio:
 690			ret = match_int(&args[0], &intarg);
 691			if (ret)
 692				goto out;
 693			info->metadata_ratio = intarg;
 694			btrfs_info(info, "metadata ratio %u",
 695				   info->metadata_ratio);
 696			break;
 697		case Opt_discard:
 698			btrfs_set_and_info(info, DISCARD,
 699					   "turning on discard");
 
 
 
 
 
 
 
 
 
 
 
 
 700			break;
 701		case Opt_nodiscard:
 702			btrfs_clear_and_info(info, DISCARD,
 703					     "turning off discard");
 
 
 704			break;
 705		case Opt_space_cache:
 706		case Opt_space_cache_version:
 707			if (token == Opt_space_cache ||
 708			    strcmp(args[0].from, "v1") == 0) {
 709				btrfs_clear_opt(info->mount_opt,
 710						FREE_SPACE_TREE);
 711				btrfs_set_and_info(info, SPACE_CACHE,
 712					   "enabling disk space caching");
 713			} else if (strcmp(args[0].from, "v2") == 0) {
 714				btrfs_clear_opt(info->mount_opt,
 715						SPACE_CACHE);
 716				btrfs_set_and_info(info, FREE_SPACE_TREE,
 717						   "enabling free space tree");
 718			} else {
 719				ret = -EINVAL;
 720				goto out;
 721			}
 722			break;
 723		case Opt_rescan_uuid_tree:
 724			btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
 725			break;
 726		case Opt_no_space_cache:
 727			if (btrfs_test_opt(info, SPACE_CACHE)) {
 728				btrfs_clear_and_info(info, SPACE_CACHE,
 729					     "disabling disk space caching");
 730			}
 731			if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
 732				btrfs_clear_and_info(info, FREE_SPACE_TREE,
 733					     "disabling free space tree");
 734			}
 735			break;
 736		case Opt_inode_cache:
 
 
 737			btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
 738					   "enabling inode map caching");
 739			break;
 740		case Opt_noinode_cache:
 741			btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
 742					     "disabling inode map caching");
 743			break;
 744		case Opt_clear_cache:
 745			btrfs_set_and_info(info, CLEAR_CACHE,
 746					   "force clearing of disk cache");
 747			break;
 748		case Opt_user_subvol_rm_allowed:
 749			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
 750			break;
 751		case Opt_enospc_debug:
 752			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
 753			break;
 754		case Opt_noenospc_debug:
 755			btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
 756			break;
 757		case Opt_defrag:
 758			btrfs_set_and_info(info, AUTO_DEFRAG,
 759					   "enabling auto defrag");
 760			break;
 761		case Opt_nodefrag:
 762			btrfs_clear_and_info(info, AUTO_DEFRAG,
 763					     "disabling auto defrag");
 764			break;
 765		case Opt_recovery:
 766			btrfs_warn(info,
 767				   "'recovery' is deprecated, use 'usebackuproot' instead");
 768			/* fall through */
 769		case Opt_usebackuproot:
 
 
 
 
 770			btrfs_info(info,
 771				   "trying to use backup root at mount time");
 772			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 773			break;
 774		case Opt_skip_balance:
 775			btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
 776			break;
 777#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 778		case Opt_check_integrity_including_extent_data:
 779			btrfs_info(info,
 780				   "enabling check integrity including extent data");
 781			btrfs_set_opt(info->mount_opt,
 782				      CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
 783			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 784			break;
 785		case Opt_check_integrity:
 786			btrfs_info(info, "enabling check integrity");
 787			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 788			break;
 789		case Opt_check_integrity_print_mask:
 790			ret = match_int(&args[0], &intarg);
 791			if (ret)
 792				goto out;
 793			info->check_integrity_print_mask = intarg;
 794			btrfs_info(info, "check_integrity_print_mask 0x%x",
 795				   info->check_integrity_print_mask);
 796			break;
 797#else
 798		case Opt_check_integrity_including_extent_data:
 799		case Opt_check_integrity:
 800		case Opt_check_integrity_print_mask:
 801			btrfs_err(info,
 802				  "support for check_integrity* not compiled in!");
 803			ret = -EINVAL;
 804			goto out;
 805#endif
 806		case Opt_fatal_errors:
 807			if (strcmp(args[0].from, "panic") == 0)
 808				btrfs_set_opt(info->mount_opt,
 809					      PANIC_ON_FATAL_ERROR);
 810			else if (strcmp(args[0].from, "bug") == 0)
 811				btrfs_clear_opt(info->mount_opt,
 812					      PANIC_ON_FATAL_ERROR);
 813			else {
 814				ret = -EINVAL;
 815				goto out;
 816			}
 817			break;
 818		case Opt_commit_interval:
 819			intarg = 0;
 820			ret = match_int(&args[0], &intarg);
 821			if (ret)
 822				goto out;
 823			if (intarg == 0) {
 824				btrfs_info(info,
 825					   "using default commit interval %us",
 826					   BTRFS_DEFAULT_COMMIT_INTERVAL);
 827				intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
 828			} else if (intarg > 300) {
 829				btrfs_warn(info, "excessive commit interval %d",
 830					   intarg);
 831			}
 832			info->commit_interval = intarg;
 833			break;
 
 
 
 
 
 834#ifdef CONFIG_BTRFS_DEBUG
 835		case Opt_fragment_all:
 836			btrfs_info(info, "fragmenting all space");
 837			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 838			btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
 839			break;
 840		case Opt_fragment_metadata:
 841			btrfs_info(info, "fragmenting metadata");
 842			btrfs_set_opt(info->mount_opt,
 843				      FRAGMENT_METADATA);
 844			break;
 845		case Opt_fragment_data:
 846			btrfs_info(info, "fragmenting data");
 847			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 848			break;
 849#endif
 850#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 851		case Opt_ref_verify:
 852			btrfs_info(info, "doing ref verification");
 853			btrfs_set_opt(info->mount_opt, REF_VERIFY);
 854			break;
 855#endif
 856		case Opt_err:
 857			btrfs_info(info, "unrecognized mount option '%s'", p);
 858			ret = -EINVAL;
 859			goto out;
 860		default:
 861			break;
 862		}
 863	}
 864check:
 865	/*
 866	 * Extra check for current option against current flag
 867	 */
 868	if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
 869		btrfs_err(info,
 870			  "nologreplay must be used with ro mount option");
 871		ret = -EINVAL;
 872	}
 873out:
 874	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
 875	    !btrfs_test_opt(info, FREE_SPACE_TREE) &&
 876	    !btrfs_test_opt(info, CLEAR_CACHE)) {
 877		btrfs_err(info, "cannot disable free space tree");
 878		ret = -EINVAL;
 879
 880	}
 881	if (!ret && btrfs_test_opt(info, SPACE_CACHE))
 882		btrfs_info(info, "disk space caching is enabled");
 883	if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
 884		btrfs_info(info, "using free space tree");
 885	return ret;
 886}
 887
 888/*
 889 * Parse mount options that are required early in the mount process.
 890 *
 891 * All other options will be parsed on much later in the mount process and
 892 * only when we need to allocate a new super block.
 893 */
 894static int btrfs_parse_device_options(const char *options, fmode_t flags,
 895				      void *holder)
 896{
 897	substring_t args[MAX_OPT_ARGS];
 898	char *device_name, *opts, *orig, *p;
 899	struct btrfs_device *device = NULL;
 900	int error = 0;
 901
 902	lockdep_assert_held(&uuid_mutex);
 903
 904	if (!options)
 905		return 0;
 906
 907	/*
 908	 * strsep changes the string, duplicate it because btrfs_parse_options
 909	 * gets called later
 910	 */
 911	opts = kstrdup(options, GFP_KERNEL);
 912	if (!opts)
 913		return -ENOMEM;
 914	orig = opts;
 915
 916	while ((p = strsep(&opts, ",")) != NULL) {
 917		int token;
 918
 919		if (!*p)
 920			continue;
 921
 922		token = match_token(p, tokens, args);
 923		if (token == Opt_device) {
 924			device_name = match_strdup(&args[0]);
 925			if (!device_name) {
 926				error = -ENOMEM;
 927				goto out;
 928			}
 929			device = btrfs_scan_one_device(device_name, flags,
 930					holder);
 931			kfree(device_name);
 932			if (IS_ERR(device)) {
 933				error = PTR_ERR(device);
 934				goto out;
 935			}
 936		}
 937	}
 938
 939out:
 940	kfree(orig);
 941	return error;
 942}
 943
 944/*
 945 * Parse mount options that are related to subvolume id
 946 *
 947 * The value is later passed to mount_subvol()
 948 */
 949static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
 950		u64 *subvol_objectid)
 951{
 952	substring_t args[MAX_OPT_ARGS];
 953	char *opts, *orig, *p;
 954	int error = 0;
 955	u64 subvolid;
 956
 957	if (!options)
 958		return 0;
 959
 960	/*
 961	 * strsep changes the string, duplicate it because
 962	 * btrfs_parse_device_options gets called later
 963	 */
 964	opts = kstrdup(options, GFP_KERNEL);
 965	if (!opts)
 966		return -ENOMEM;
 967	orig = opts;
 968
 969	while ((p = strsep(&opts, ",")) != NULL) {
 970		int token;
 971		if (!*p)
 972			continue;
 973
 974		token = match_token(p, tokens, args);
 975		switch (token) {
 976		case Opt_subvol:
 977			kfree(*subvol_name);
 978			*subvol_name = match_strdup(&args[0]);
 979			if (!*subvol_name) {
 980				error = -ENOMEM;
 981				goto out;
 982			}
 983			break;
 984		case Opt_subvolid:
 985			error = match_u64(&args[0], &subvolid);
 986			if (error)
 987				goto out;
 988
 989			/* we want the original fs_tree */
 990			if (subvolid == 0)
 991				subvolid = BTRFS_FS_TREE_OBJECTID;
 992
 993			*subvol_objectid = subvolid;
 994			break;
 995		case Opt_subvolrootid:
 996			pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
 997			break;
 998		default:
 999			break;
1000		}
1001	}
1002
1003out:
1004	kfree(orig);
1005	return error;
1006}
1007
1008static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1009					   u64 subvol_objectid)
1010{
1011	struct btrfs_root *root = fs_info->tree_root;
1012	struct btrfs_root *fs_root;
1013	struct btrfs_root_ref *root_ref;
1014	struct btrfs_inode_ref *inode_ref;
1015	struct btrfs_key key;
1016	struct btrfs_path *path = NULL;
1017	char *name = NULL, *ptr;
1018	u64 dirid;
1019	int len;
1020	int ret;
1021
1022	path = btrfs_alloc_path();
1023	if (!path) {
1024		ret = -ENOMEM;
1025		goto err;
1026	}
1027	path->leave_spinning = 1;
1028
1029	name = kmalloc(PATH_MAX, GFP_KERNEL);
1030	if (!name) {
1031		ret = -ENOMEM;
1032		goto err;
1033	}
1034	ptr = name + PATH_MAX - 1;
1035	ptr[0] = '\0';
1036
1037	/*
1038	 * Walk up the subvolume trees in the tree of tree roots by root
1039	 * backrefs until we hit the top-level subvolume.
1040	 */
1041	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
1042		key.objectid = subvol_objectid;
1043		key.type = BTRFS_ROOT_BACKREF_KEY;
1044		key.offset = (u64)-1;
1045
1046		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1047		if (ret < 0) {
1048			goto err;
1049		} else if (ret > 0) {
1050			ret = btrfs_previous_item(root, path, subvol_objectid,
1051						  BTRFS_ROOT_BACKREF_KEY);
1052			if (ret < 0) {
1053				goto err;
1054			} else if (ret > 0) {
1055				ret = -ENOENT;
1056				goto err;
1057			}
1058		}
1059
1060		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1061		subvol_objectid = key.offset;
1062
1063		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1064					  struct btrfs_root_ref);
1065		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
1066		ptr -= len + 1;
1067		if (ptr < name) {
1068			ret = -ENAMETOOLONG;
1069			goto err;
1070		}
1071		read_extent_buffer(path->nodes[0], ptr + 1,
1072				   (unsigned long)(root_ref + 1), len);
1073		ptr[0] = '/';
1074		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
1075		btrfs_release_path(path);
1076
1077		key.objectid = subvol_objectid;
1078		key.type = BTRFS_ROOT_ITEM_KEY;
1079		key.offset = (u64)-1;
1080		fs_root = btrfs_read_fs_root_no_name(fs_info, &key);
1081		if (IS_ERR(fs_root)) {
1082			ret = PTR_ERR(fs_root);
 
1083			goto err;
1084		}
1085
1086		/*
1087		 * Walk up the filesystem tree by inode refs until we hit the
1088		 * root directory.
1089		 */
1090		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
1091			key.objectid = dirid;
1092			key.type = BTRFS_INODE_REF_KEY;
1093			key.offset = (u64)-1;
1094
1095			ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1096			if (ret < 0) {
1097				goto err;
1098			} else if (ret > 0) {
1099				ret = btrfs_previous_item(fs_root, path, dirid,
1100							  BTRFS_INODE_REF_KEY);
1101				if (ret < 0) {
1102					goto err;
1103				} else if (ret > 0) {
1104					ret = -ENOENT;
1105					goto err;
1106				}
1107			}
1108
1109			btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1110			dirid = key.offset;
1111
1112			inode_ref = btrfs_item_ptr(path->nodes[0],
1113						   path->slots[0],
1114						   struct btrfs_inode_ref);
1115			len = btrfs_inode_ref_name_len(path->nodes[0],
1116						       inode_ref);
1117			ptr -= len + 1;
1118			if (ptr < name) {
1119				ret = -ENAMETOOLONG;
1120				goto err;
1121			}
1122			read_extent_buffer(path->nodes[0], ptr + 1,
1123					   (unsigned long)(inode_ref + 1), len);
1124			ptr[0] = '/';
1125			btrfs_release_path(path);
1126		}
 
 
1127	}
1128
1129	btrfs_free_path(path);
1130	if (ptr == name + PATH_MAX - 1) {
1131		name[0] = '/';
1132		name[1] = '\0';
1133	} else {
1134		memmove(name, ptr, name + PATH_MAX - ptr);
1135	}
1136	return name;
1137
1138err:
 
1139	btrfs_free_path(path);
1140	kfree(name);
1141	return ERR_PTR(ret);
1142}
1143
1144static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
1145{
1146	struct btrfs_root *root = fs_info->tree_root;
1147	struct btrfs_dir_item *di;
1148	struct btrfs_path *path;
1149	struct btrfs_key location;
1150	u64 dir_id;
1151
1152	path = btrfs_alloc_path();
1153	if (!path)
1154		return -ENOMEM;
1155	path->leave_spinning = 1;
1156
1157	/*
1158	 * Find the "default" dir item which points to the root item that we
1159	 * will mount by default if we haven't been given a specific subvolume
1160	 * to mount.
1161	 */
1162	dir_id = btrfs_super_root_dir(fs_info->super_copy);
1163	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
1164	if (IS_ERR(di)) {
1165		btrfs_free_path(path);
1166		return PTR_ERR(di);
1167	}
1168	if (!di) {
1169		/*
1170		 * Ok the default dir item isn't there.  This is weird since
1171		 * it's always been there, but don't freak out, just try and
1172		 * mount the top-level subvolume.
1173		 */
1174		btrfs_free_path(path);
1175		*objectid = BTRFS_FS_TREE_OBJECTID;
1176		return 0;
1177	}
1178
1179	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1180	btrfs_free_path(path);
1181	*objectid = location.objectid;
1182	return 0;
1183}
1184
1185static int btrfs_fill_super(struct super_block *sb,
1186			    struct btrfs_fs_devices *fs_devices,
1187			    void *data)
1188{
1189	struct inode *inode;
1190	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1191	struct btrfs_key key;
1192	int err;
1193
1194	sb->s_maxbytes = MAX_LFS_FILESIZE;
1195	sb->s_magic = BTRFS_SUPER_MAGIC;
1196	sb->s_op = &btrfs_super_ops;
1197	sb->s_d_op = &btrfs_dentry_operations;
1198	sb->s_export_op = &btrfs_export_ops;
1199	sb->s_xattr = btrfs_xattr_handlers;
1200	sb->s_time_gran = 1;
1201#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1202	sb->s_flags |= SB_POSIXACL;
1203#endif
1204	sb->s_flags |= SB_I_VERSION;
1205	sb->s_iflags |= SB_I_CGROUPWB;
1206
1207	err = super_setup_bdi(sb);
1208	if (err) {
1209		btrfs_err(fs_info, "super_setup_bdi failed");
1210		return err;
1211	}
1212
1213	err = open_ctree(sb, fs_devices, (char *)data);
1214	if (err) {
1215		btrfs_err(fs_info, "open_ctree failed");
1216		return err;
1217	}
1218
1219	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
1220	key.type = BTRFS_INODE_ITEM_KEY;
1221	key.offset = 0;
1222	inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
1223	if (IS_ERR(inode)) {
1224		err = PTR_ERR(inode);
1225		goto fail_close;
1226	}
1227
1228	sb->s_root = d_make_root(inode);
1229	if (!sb->s_root) {
1230		err = -ENOMEM;
1231		goto fail_close;
1232	}
1233
1234	cleancache_init_fs(sb);
1235	sb->s_flags |= SB_ACTIVE;
1236	return 0;
1237
1238fail_close:
1239	close_ctree(fs_info);
1240	return err;
1241}
1242
1243int btrfs_sync_fs(struct super_block *sb, int wait)
1244{
1245	struct btrfs_trans_handle *trans;
1246	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1247	struct btrfs_root *root = fs_info->tree_root;
1248
1249	trace_btrfs_sync_fs(fs_info, wait);
1250
1251	if (!wait) {
1252		filemap_flush(fs_info->btree_inode->i_mapping);
1253		return 0;
1254	}
1255
1256	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1257
1258	trans = btrfs_attach_transaction_barrier(root);
1259	if (IS_ERR(trans)) {
1260		/* no transaction, don't bother */
1261		if (PTR_ERR(trans) == -ENOENT) {
1262			/*
1263			 * Exit unless we have some pending changes
1264			 * that need to go through commit
1265			 */
1266			if (fs_info->pending_changes == 0)
1267				return 0;
1268			/*
1269			 * A non-blocking test if the fs is frozen. We must not
1270			 * start a new transaction here otherwise a deadlock
1271			 * happens. The pending operations are delayed to the
1272			 * next commit after thawing.
1273			 */
1274			if (sb_start_write_trylock(sb))
1275				sb_end_write(sb);
1276			else
1277				return 0;
1278			trans = btrfs_start_transaction(root, 0);
1279		}
1280		if (IS_ERR(trans))
1281			return PTR_ERR(trans);
1282	}
1283	return btrfs_commit_transaction(trans);
1284}
1285
1286static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1287{
1288	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1289	const char *compress_type;
 
1290
1291	if (btrfs_test_opt(info, DEGRADED))
1292		seq_puts(seq, ",degraded");
1293	if (btrfs_test_opt(info, NODATASUM))
1294		seq_puts(seq, ",nodatasum");
1295	if (btrfs_test_opt(info, NODATACOW))
1296		seq_puts(seq, ",nodatacow");
1297	if (btrfs_test_opt(info, NOBARRIER))
1298		seq_puts(seq, ",nobarrier");
1299	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1300		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1301	if (info->thread_pool_size !=  min_t(unsigned long,
1302					     num_online_cpus() + 2, 8))
1303		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1304	if (btrfs_test_opt(info, COMPRESS)) {
1305		compress_type = btrfs_compress_type2str(info->compress_type);
1306		if (btrfs_test_opt(info, FORCE_COMPRESS))
1307			seq_printf(seq, ",compress-force=%s", compress_type);
1308		else
1309			seq_printf(seq, ",compress=%s", compress_type);
1310		if (info->compress_level)
1311			seq_printf(seq, ":%d", info->compress_level);
1312	}
1313	if (btrfs_test_opt(info, NOSSD))
1314		seq_puts(seq, ",nossd");
1315	if (btrfs_test_opt(info, SSD_SPREAD))
1316		seq_puts(seq, ",ssd_spread");
1317	else if (btrfs_test_opt(info, SSD))
1318		seq_puts(seq, ",ssd");
1319	if (btrfs_test_opt(info, NOTREELOG))
1320		seq_puts(seq, ",notreelog");
1321	if (btrfs_test_opt(info, NOLOGREPLAY))
1322		seq_puts(seq, ",nologreplay");
1323	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1324		seq_puts(seq, ",flushoncommit");
1325	if (btrfs_test_opt(info, DISCARD))
1326		seq_puts(seq, ",discard");
 
 
1327	if (!(info->sb->s_flags & SB_POSIXACL))
1328		seq_puts(seq, ",noacl");
1329	if (btrfs_test_opt(info, SPACE_CACHE))
1330		seq_puts(seq, ",space_cache");
1331	else if (btrfs_test_opt(info, FREE_SPACE_TREE))
1332		seq_puts(seq, ",space_cache=v2");
1333	else
1334		seq_puts(seq, ",nospace_cache");
1335	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1336		seq_puts(seq, ",rescan_uuid_tree");
1337	if (btrfs_test_opt(info, CLEAR_CACHE))
1338		seq_puts(seq, ",clear_cache");
1339	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1340		seq_puts(seq, ",user_subvol_rm_allowed");
1341	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1342		seq_puts(seq, ",enospc_debug");
1343	if (btrfs_test_opt(info, AUTO_DEFRAG))
1344		seq_puts(seq, ",autodefrag");
1345	if (btrfs_test_opt(info, INODE_MAP_CACHE))
1346		seq_puts(seq, ",inode_cache");
1347	if (btrfs_test_opt(info, SKIP_BALANCE))
1348		seq_puts(seq, ",skip_balance");
1349#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1350	if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
1351		seq_puts(seq, ",check_int_data");
1352	else if (btrfs_test_opt(info, CHECK_INTEGRITY))
1353		seq_puts(seq, ",check_int");
1354	if (info->check_integrity_print_mask)
1355		seq_printf(seq, ",check_int_print_mask=%d",
1356				info->check_integrity_print_mask);
1357#endif
1358	if (info->metadata_ratio)
1359		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1360	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1361		seq_puts(seq, ",fatal_errors=panic");
1362	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1363		seq_printf(seq, ",commit=%u", info->commit_interval);
1364#ifdef CONFIG_BTRFS_DEBUG
1365	if (btrfs_test_opt(info, FRAGMENT_DATA))
1366		seq_puts(seq, ",fragment=data");
1367	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1368		seq_puts(seq, ",fragment=metadata");
1369#endif
1370	if (btrfs_test_opt(info, REF_VERIFY))
1371		seq_puts(seq, ",ref_verify");
1372	seq_printf(seq, ",subvolid=%llu",
1373		  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1374	seq_puts(seq, ",subvol=");
1375	seq_dentry(seq, dentry, " \t\n\\");
 
 
 
 
 
1376	return 0;
1377}
1378
1379static int btrfs_test_super(struct super_block *s, void *data)
1380{
1381	struct btrfs_fs_info *p = data;
1382	struct btrfs_fs_info *fs_info = btrfs_sb(s);
1383
1384	return fs_info->fs_devices == p->fs_devices;
1385}
1386
1387static int btrfs_set_super(struct super_block *s, void *data)
1388{
1389	int err = set_anon_super(s, data);
1390	if (!err)
1391		s->s_fs_info = data;
1392	return err;
1393}
1394
1395/*
1396 * subvolumes are identified by ino 256
1397 */
1398static inline int is_subvolume_inode(struct inode *inode)
1399{
1400	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1401		return 1;
1402	return 0;
1403}
1404
1405static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1406				   struct vfsmount *mnt)
1407{
1408	struct dentry *root;
1409	int ret;
1410
1411	if (!subvol_name) {
1412		if (!subvol_objectid) {
1413			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1414							  &subvol_objectid);
1415			if (ret) {
1416				root = ERR_PTR(ret);
1417				goto out;
1418			}
1419		}
1420		subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
1421							    subvol_objectid);
1422		if (IS_ERR(subvol_name)) {
1423			root = ERR_CAST(subvol_name);
1424			subvol_name = NULL;
1425			goto out;
1426		}
1427
1428	}
1429
1430	root = mount_subtree(mnt, subvol_name);
1431	/* mount_subtree() drops our reference on the vfsmount. */
1432	mnt = NULL;
1433
1434	if (!IS_ERR(root)) {
1435		struct super_block *s = root->d_sb;
1436		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1437		struct inode *root_inode = d_inode(root);
1438		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1439
1440		ret = 0;
1441		if (!is_subvolume_inode(root_inode)) {
1442			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1443			       subvol_name);
1444			ret = -EINVAL;
1445		}
1446		if (subvol_objectid && root_objectid != subvol_objectid) {
1447			/*
1448			 * This will also catch a race condition where a
1449			 * subvolume which was passed by ID is renamed and
1450			 * another subvolume is renamed over the old location.
1451			 */
1452			btrfs_err(fs_info,
1453				  "subvol '%s' does not match subvolid %llu",
1454				  subvol_name, subvol_objectid);
1455			ret = -EINVAL;
1456		}
1457		if (ret) {
1458			dput(root);
1459			root = ERR_PTR(ret);
1460			deactivate_locked_super(s);
1461		}
1462	}
1463
1464out:
1465	mntput(mnt);
1466	kfree(subvol_name);
1467	return root;
1468}
1469
1470/*
1471 * Find a superblock for the given device / mount point.
1472 *
1473 * Note: This is based on mount_bdev from fs/super.c with a few additions
1474 *       for multiple device setup.  Make sure to keep it in sync.
1475 */
1476static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
1477		int flags, const char *device_name, void *data)
1478{
1479	struct block_device *bdev = NULL;
1480	struct super_block *s;
1481	struct btrfs_device *device = NULL;
1482	struct btrfs_fs_devices *fs_devices = NULL;
1483	struct btrfs_fs_info *fs_info = NULL;
1484	void *new_sec_opts = NULL;
1485	fmode_t mode = FMODE_READ;
1486	int error = 0;
1487
1488	if (!(flags & SB_RDONLY))
1489		mode |= FMODE_WRITE;
1490
1491	if (data) {
1492		error = security_sb_eat_lsm_opts(data, &new_sec_opts);
1493		if (error)
1494			return ERR_PTR(error);
1495	}
1496
1497	/*
1498	 * Setup a dummy root and fs_info for test/set super.  This is because
1499	 * we don't actually fill this stuff out until open_ctree, but we need
1500	 * it for searching for existing supers, so this lets us do that and
1501	 * then open_ctree will properly initialize everything later.
 
 
1502	 */
1503	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
1504	if (!fs_info) {
1505		error = -ENOMEM;
1506		goto error_sec_opts;
1507	}
 
1508
1509	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1510	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1511	if (!fs_info->super_copy || !fs_info->super_for_commit) {
1512		error = -ENOMEM;
1513		goto error_fs_info;
1514	}
1515
1516	mutex_lock(&uuid_mutex);
1517	error = btrfs_parse_device_options(data, mode, fs_type);
1518	if (error) {
1519		mutex_unlock(&uuid_mutex);
1520		goto error_fs_info;
1521	}
1522
1523	device = btrfs_scan_one_device(device_name, mode, fs_type);
1524	if (IS_ERR(device)) {
1525		mutex_unlock(&uuid_mutex);
1526		error = PTR_ERR(device);
1527		goto error_fs_info;
1528	}
1529
1530	fs_devices = device->fs_devices;
1531	fs_info->fs_devices = fs_devices;
1532
1533	error = btrfs_open_devices(fs_devices, mode, fs_type);
1534	mutex_unlock(&uuid_mutex);
1535	if (error)
1536		goto error_fs_info;
1537
1538	if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1539		error = -EACCES;
1540		goto error_close_devices;
1541	}
1542
1543	bdev = fs_devices->latest_bdev;
1544	s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
1545		 fs_info);
1546	if (IS_ERR(s)) {
1547		error = PTR_ERR(s);
1548		goto error_close_devices;
1549	}
1550
1551	if (s->s_root) {
1552		btrfs_close_devices(fs_devices);
1553		free_fs_info(fs_info);
1554		if ((flags ^ s->s_flags) & SB_RDONLY)
1555			error = -EBUSY;
1556	} else {
1557		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1558		btrfs_sb(s)->bdev_holder = fs_type;
1559		if (!strstr(crc32c_impl(), "generic"))
1560			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1561		error = btrfs_fill_super(s, fs_devices, data);
1562	}
1563	if (!error)
1564		error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL);
1565	security_free_mnt_opts(&new_sec_opts);
1566	if (error) {
1567		deactivate_locked_super(s);
1568		return ERR_PTR(error);
1569	}
1570
1571	return dget(s->s_root);
1572
1573error_close_devices:
1574	btrfs_close_devices(fs_devices);
1575error_fs_info:
1576	free_fs_info(fs_info);
1577error_sec_opts:
1578	security_free_mnt_opts(&new_sec_opts);
1579	return ERR_PTR(error);
1580}
1581
1582/*
1583 * Mount function which is called by VFS layer.
1584 *
1585 * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
1586 * which needs vfsmount* of device's root (/).  This means device's root has to
1587 * be mounted internally in any case.
1588 *
1589 * Operation flow:
1590 *   1. Parse subvol id related options for later use in mount_subvol().
1591 *
1592 *   2. Mount device's root (/) by calling vfs_kern_mount().
1593 *
1594 *      NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
1595 *      first place. In order to avoid calling btrfs_mount() again, we use
1596 *      different file_system_type which is not registered to VFS by
1597 *      register_filesystem() (btrfs_root_fs_type). As a result,
1598 *      btrfs_mount_root() is called. The return value will be used by
1599 *      mount_subtree() in mount_subvol().
1600 *
1601 *   3. Call mount_subvol() to get the dentry of subvolume. Since there is
1602 *      "btrfs subvolume set-default", mount_subvol() is called always.
1603 */
1604static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1605		const char *device_name, void *data)
1606{
1607	struct vfsmount *mnt_root;
1608	struct dentry *root;
1609	char *subvol_name = NULL;
1610	u64 subvol_objectid = 0;
1611	int error = 0;
1612
1613	error = btrfs_parse_subvol_options(data, &subvol_name,
1614					&subvol_objectid);
1615	if (error) {
1616		kfree(subvol_name);
1617		return ERR_PTR(error);
1618	}
1619
1620	/* mount device's root (/) */
1621	mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
1622	if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
1623		if (flags & SB_RDONLY) {
1624			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1625				flags & ~SB_RDONLY, device_name, data);
1626		} else {
1627			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1628				flags | SB_RDONLY, device_name, data);
1629			if (IS_ERR(mnt_root)) {
1630				root = ERR_CAST(mnt_root);
1631				kfree(subvol_name);
1632				goto out;
1633			}
1634
1635			down_write(&mnt_root->mnt_sb->s_umount);
1636			error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
1637			up_write(&mnt_root->mnt_sb->s_umount);
1638			if (error < 0) {
1639				root = ERR_PTR(error);
1640				mntput(mnt_root);
1641				kfree(subvol_name);
1642				goto out;
1643			}
1644		}
1645	}
1646	if (IS_ERR(mnt_root)) {
1647		root = ERR_CAST(mnt_root);
1648		kfree(subvol_name);
1649		goto out;
1650	}
1651
1652	/* mount_subvol() will free subvol_name and mnt_root */
1653	root = mount_subvol(subvol_name, subvol_objectid, mnt_root);
1654
1655out:
1656	return root;
1657}
1658
1659static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1660				     u32 new_pool_size, u32 old_pool_size)
1661{
1662	if (new_pool_size == old_pool_size)
1663		return;
1664
1665	fs_info->thread_pool_size = new_pool_size;
1666
1667	btrfs_info(fs_info, "resize thread pool %d -> %d",
1668	       old_pool_size, new_pool_size);
1669
1670	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1671	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
1672	btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
1673	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1674	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
1675	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
1676	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
1677				new_pool_size);
1678	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1679	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1680	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1681	btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
1682	btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
1683				new_pool_size);
1684}
1685
1686static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info)
1687{
1688	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1689}
1690
1691static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1692				       unsigned long old_opts, int flags)
1693{
1694	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1695	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1696	     (flags & SB_RDONLY))) {
1697		/* wait for any defraggers to finish */
1698		wait_event(fs_info->transaction_wait,
1699			   (atomic_read(&fs_info->defrag_running) == 0));
1700		if (flags & SB_RDONLY)
1701			sync_filesystem(fs_info->sb);
1702	}
1703}
1704
1705static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1706					 unsigned long old_opts)
1707{
1708	/*
1709	 * We need to cleanup all defragable inodes if the autodefragment is
1710	 * close or the filesystem is read only.
1711	 */
1712	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1713	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1714		btrfs_cleanup_defrag_inodes(fs_info);
1715	}
1716
1717	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
 
 
 
 
 
 
1718}
1719
1720static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1721{
1722	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1723	struct btrfs_root *root = fs_info->tree_root;
1724	unsigned old_flags = sb->s_flags;
1725	unsigned long old_opts = fs_info->mount_opt;
1726	unsigned long old_compress_type = fs_info->compress_type;
1727	u64 old_max_inline = fs_info->max_inline;
1728	u32 old_thread_pool_size = fs_info->thread_pool_size;
1729	u32 old_metadata_ratio = fs_info->metadata_ratio;
1730	int ret;
1731
1732	sync_filesystem(sb);
1733	btrfs_remount_prepare(fs_info);
1734
1735	if (data) {
1736		void *new_sec_opts = NULL;
1737
1738		ret = security_sb_eat_lsm_opts(data, &new_sec_opts);
1739		if (!ret)
1740			ret = security_sb_remount(sb, new_sec_opts);
1741		security_free_mnt_opts(&new_sec_opts);
1742		if (ret)
1743			goto restore;
1744	}
1745
1746	ret = btrfs_parse_options(fs_info, data, *flags);
1747	if (ret)
1748		goto restore;
1749
1750	btrfs_remount_begin(fs_info, old_opts, *flags);
1751	btrfs_resize_thread_pool(fs_info,
1752		fs_info->thread_pool_size, old_thread_pool_size);
1753
1754	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
1755		goto out;
1756
1757	if (*flags & SB_RDONLY) {
1758		/*
1759		 * this also happens on 'umount -rf' or on shutdown, when
1760		 * the filesystem is busy.
1761		 */
1762		cancel_work_sync(&fs_info->async_reclaim_work);
1763
 
 
1764		/* wait for the uuid_scan task to finish */
1765		down(&fs_info->uuid_tree_rescan_sem);
1766		/* avoid complains from lockdep et al. */
1767		up(&fs_info->uuid_tree_rescan_sem);
1768
1769		sb->s_flags |= SB_RDONLY;
1770
1771		/*
1772		 * Setting SB_RDONLY will put the cleaner thread to
1773		 * sleep at the next loop if it's already active.
1774		 * If it's already asleep, we'll leave unused block
1775		 * groups on disk until we're mounted read-write again
1776		 * unless we clean them up here.
1777		 */
1778		btrfs_delete_unused_bgs(fs_info);
1779
1780		btrfs_dev_replace_suspend_for_unmount(fs_info);
1781		btrfs_scrub_cancel(fs_info);
1782		btrfs_pause_balance(fs_info);
1783
1784		ret = btrfs_commit_super(fs_info);
1785		if (ret)
1786			goto restore;
1787	} else {
1788		if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1789			btrfs_err(fs_info,
1790				"Remounting read-write after error is not allowed");
1791			ret = -EINVAL;
1792			goto restore;
1793		}
1794		if (fs_info->fs_devices->rw_devices == 0) {
1795			ret = -EACCES;
1796			goto restore;
1797		}
1798
1799		if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1800			btrfs_warn(fs_info,
1801		"too many missing devices, writable remount is not allowed");
1802			ret = -EACCES;
1803			goto restore;
1804		}
1805
1806		if (btrfs_super_log_root(fs_info->super_copy) != 0) {
 
 
1807			ret = -EINVAL;
1808			goto restore;
1809		}
1810
1811		ret = btrfs_cleanup_fs_roots(fs_info);
1812		if (ret)
1813			goto restore;
1814
1815		/* recover relocation */
1816		mutex_lock(&fs_info->cleaner_mutex);
1817		ret = btrfs_recover_relocation(root);
1818		mutex_unlock(&fs_info->cleaner_mutex);
1819		if (ret)
1820			goto restore;
1821
1822		ret = btrfs_resume_balance_async(fs_info);
1823		if (ret)
1824			goto restore;
1825
1826		ret = btrfs_resume_dev_replace_async(fs_info);
1827		if (ret) {
1828			btrfs_warn(fs_info, "failed to resume dev_replace");
1829			goto restore;
1830		}
1831
1832		btrfs_qgroup_rescan_resume(fs_info);
1833
1834		if (!fs_info->uuid_root) {
1835			btrfs_info(fs_info, "creating UUID tree");
1836			ret = btrfs_create_uuid_tree(fs_info);
1837			if (ret) {
1838				btrfs_warn(fs_info,
1839					   "failed to create the UUID tree %d",
1840					   ret);
1841				goto restore;
1842			}
1843		}
1844		sb->s_flags &= ~SB_RDONLY;
1845
1846		set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1847	}
1848out:
 
 
 
 
 
 
1849	wake_up_process(fs_info->transaction_kthread);
1850	btrfs_remount_cleanup(fs_info, old_opts);
 
 
1851	return 0;
1852
1853restore:
1854	/* We've hit an error - don't reset SB_RDONLY */
1855	if (sb_rdonly(sb))
1856		old_flags |= SB_RDONLY;
1857	sb->s_flags = old_flags;
1858	fs_info->mount_opt = old_opts;
1859	fs_info->compress_type = old_compress_type;
1860	fs_info->max_inline = old_max_inline;
1861	btrfs_resize_thread_pool(fs_info,
1862		old_thread_pool_size, fs_info->thread_pool_size);
1863	fs_info->metadata_ratio = old_metadata_ratio;
1864	btrfs_remount_cleanup(fs_info, old_opts);
 
 
1865	return ret;
1866}
1867
1868/* Used to sort the devices by max_avail(descending sort) */
1869static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
1870				       const void *dev_info2)
1871{
1872	if (((struct btrfs_device_info *)dev_info1)->max_avail >
1873	    ((struct btrfs_device_info *)dev_info2)->max_avail)
1874		return -1;
1875	else if (((struct btrfs_device_info *)dev_info1)->max_avail <
1876		 ((struct btrfs_device_info *)dev_info2)->max_avail)
1877		return 1;
1878	else
1879	return 0;
1880}
1881
1882/*
1883 * sort the devices by max_avail, in which max free extent size of each device
1884 * is stored.(Descending Sort)
1885 */
1886static inline void btrfs_descending_sort_devices(
1887					struct btrfs_device_info *devices,
1888					size_t nr_devices)
1889{
1890	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
1891	     btrfs_cmp_device_free_bytes, NULL);
1892}
1893
1894/*
1895 * The helper to calc the free space on the devices that can be used to store
1896 * file data.
1897 */
1898static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
1899					      u64 *free_bytes)
1900{
1901	struct btrfs_device_info *devices_info;
1902	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1903	struct btrfs_device *device;
1904	u64 type;
1905	u64 avail_space;
1906	u64 min_stripe_size;
1907	int num_stripes = 1;
1908	int i = 0, nr_devices;
1909	const struct btrfs_raid_attr *rattr;
1910
1911	/*
1912	 * We aren't under the device list lock, so this is racy-ish, but good
1913	 * enough for our purposes.
1914	 */
1915	nr_devices = fs_info->fs_devices->open_devices;
1916	if (!nr_devices) {
1917		smp_mb();
1918		nr_devices = fs_info->fs_devices->open_devices;
1919		ASSERT(nr_devices);
1920		if (!nr_devices) {
1921			*free_bytes = 0;
1922			return 0;
1923		}
1924	}
1925
1926	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
1927			       GFP_KERNEL);
1928	if (!devices_info)
1929		return -ENOMEM;
1930
1931	/* calc min stripe number for data space allocation */
1932	type = btrfs_data_alloc_profile(fs_info);
1933	rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
1934
1935	if (type & BTRFS_BLOCK_GROUP_RAID0)
1936		num_stripes = nr_devices;
1937	else if (type & BTRFS_BLOCK_GROUP_RAID1)
1938		num_stripes = 2;
 
 
 
 
1939	else if (type & BTRFS_BLOCK_GROUP_RAID10)
1940		num_stripes = 4;
1941
1942	/* Adjust for more than 1 stripe per device */
1943	min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
1944
1945	rcu_read_lock();
1946	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
1947		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1948						&device->dev_state) ||
1949		    !device->bdev ||
1950		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
1951			continue;
1952
1953		if (i >= nr_devices)
1954			break;
1955
1956		avail_space = device->total_bytes - device->bytes_used;
1957
1958		/* align with stripe_len */
1959		avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
1960
1961		/*
1962		 * In order to avoid overwriting the superblock on the drive,
1963		 * btrfs starts at an offset of at least 1MB when doing chunk
1964		 * allocation.
1965		 *
1966		 * This ensures we have at least min_stripe_size free space
1967		 * after excluding 1MB.
1968		 */
1969		if (avail_space <= SZ_1M + min_stripe_size)
1970			continue;
1971
1972		avail_space -= SZ_1M;
1973
1974		devices_info[i].dev = device;
1975		devices_info[i].max_avail = avail_space;
1976
1977		i++;
1978	}
1979	rcu_read_unlock();
1980
1981	nr_devices = i;
1982
1983	btrfs_descending_sort_devices(devices_info, nr_devices);
1984
1985	i = nr_devices - 1;
1986	avail_space = 0;
1987	while (nr_devices >= rattr->devs_min) {
1988		num_stripes = min(num_stripes, nr_devices);
1989
1990		if (devices_info[i].max_avail >= min_stripe_size) {
1991			int j;
1992			u64 alloc_size;
1993
1994			avail_space += devices_info[i].max_avail * num_stripes;
1995			alloc_size = devices_info[i].max_avail;
1996			for (j = i + 1 - num_stripes; j <= i; j++)
1997				devices_info[j].max_avail -= alloc_size;
1998		}
1999		i--;
2000		nr_devices--;
2001	}
2002
2003	kfree(devices_info);
2004	*free_bytes = avail_space;
2005	return 0;
2006}
2007
2008/*
2009 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
2010 *
2011 * If there's a redundant raid level at DATA block groups, use the respective
2012 * multiplier to scale the sizes.
2013 *
2014 * Unused device space usage is based on simulating the chunk allocator
2015 * algorithm that respects the device sizes and order of allocations.  This is
2016 * a close approximation of the actual use but there are other factors that may
2017 * change the result (like a new metadata chunk).
2018 *
2019 * If metadata is exhausted, f_bavail will be 0.
2020 */
2021static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2022{
2023	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
2024	struct btrfs_super_block *disk_super = fs_info->super_copy;
2025	struct list_head *head = &fs_info->space_info;
2026	struct btrfs_space_info *found;
2027	u64 total_used = 0;
2028	u64 total_free_data = 0;
2029	u64 total_free_meta = 0;
2030	int bits = dentry->d_sb->s_blocksize_bits;
2031	__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
2032	unsigned factor = 1;
2033	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
2034	int ret;
2035	u64 thresh = 0;
2036	int mixed = 0;
2037
2038	rcu_read_lock();
2039	list_for_each_entry_rcu(found, head, list) {
2040		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
2041			int i;
2042
2043			total_free_data += found->disk_total - found->disk_used;
2044			total_free_data -=
2045				btrfs_account_ro_block_groups_free_space(found);
2046
2047			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2048				if (!list_empty(&found->block_groups[i]))
2049					factor = btrfs_bg_type_to_factor(
2050						btrfs_raid_array[i].bg_flag);
2051			}
2052		}
2053
2054		/*
2055		 * Metadata in mixed block goup profiles are accounted in data
2056		 */
2057		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
2058			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
2059				mixed = 1;
2060			else
2061				total_free_meta += found->disk_total -
2062					found->disk_used;
2063		}
2064
2065		total_used += found->disk_used;
2066	}
2067
2068	rcu_read_unlock();
2069
2070	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
2071	buf->f_blocks >>= bits;
2072	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
2073
2074	/* Account global block reserve as used, it's in logical size already */
2075	spin_lock(&block_rsv->lock);
2076	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
2077	if (buf->f_bfree >= block_rsv->size >> bits)
2078		buf->f_bfree -= block_rsv->size >> bits;
2079	else
2080		buf->f_bfree = 0;
2081	spin_unlock(&block_rsv->lock);
2082
2083	buf->f_bavail = div_u64(total_free_data, factor);
2084	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
2085	if (ret)
2086		return ret;
2087	buf->f_bavail += div_u64(total_free_data, factor);
2088	buf->f_bavail = buf->f_bavail >> bits;
2089
2090	/*
2091	 * We calculate the remaining metadata space minus global reserve. If
2092	 * this is (supposedly) smaller than zero, there's no space. But this
2093	 * does not hold in practice, the exhausted state happens where's still
2094	 * some positive delta. So we apply some guesswork and compare the
2095	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
2096	 *
2097	 * We probably cannot calculate the exact threshold value because this
2098	 * depends on the internal reservations requested by various
2099	 * operations, so some operations that consume a few metadata will
2100	 * succeed even if the Avail is zero. But this is better than the other
2101	 * way around.
2102	 */
2103	thresh = SZ_4M;
2104
2105	if (!mixed && total_free_meta - thresh < block_rsv->size)
 
 
 
 
 
 
 
 
2106		buf->f_bavail = 0;
2107
2108	buf->f_type = BTRFS_SUPER_MAGIC;
2109	buf->f_bsize = dentry->d_sb->s_blocksize;
2110	buf->f_namelen = BTRFS_NAME_LEN;
2111
2112	/* We treat it as constant endianness (it doesn't matter _which_)
2113	   because we want the fsid to come out the same whether mounted
2114	   on a big-endian or little-endian host */
2115	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
2116	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
2117	/* Mask in the root object ID too, to disambiguate subvols */
2118	buf->f_fsid.val[0] ^=
2119		BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
2120	buf->f_fsid.val[1] ^=
2121		BTRFS_I(d_inode(dentry))->root->root_key.objectid;
2122
2123	return 0;
2124}
2125
2126static void btrfs_kill_super(struct super_block *sb)
2127{
2128	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2129	kill_anon_super(sb);
2130	free_fs_info(fs_info);
2131}
2132
2133static struct file_system_type btrfs_fs_type = {
2134	.owner		= THIS_MODULE,
2135	.name		= "btrfs",
2136	.mount		= btrfs_mount,
2137	.kill_sb	= btrfs_kill_super,
2138	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2139};
2140
2141static struct file_system_type btrfs_root_fs_type = {
2142	.owner		= THIS_MODULE,
2143	.name		= "btrfs",
2144	.mount		= btrfs_mount_root,
2145	.kill_sb	= btrfs_kill_super,
2146	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2147};
2148
2149MODULE_ALIAS_FS("btrfs");
2150
2151static int btrfs_control_open(struct inode *inode, struct file *file)
2152{
2153	/*
2154	 * The control file's private_data is used to hold the
2155	 * transaction when it is started and is used to keep
2156	 * track of whether a transaction is already in progress.
2157	 */
2158	file->private_data = NULL;
2159	return 0;
2160}
2161
2162/*
2163 * used by btrfsctl to scan devices when no FS is mounted
2164 */
2165static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2166				unsigned long arg)
2167{
2168	struct btrfs_ioctl_vol_args *vol;
2169	struct btrfs_device *device = NULL;
2170	int ret = -ENOTTY;
2171
2172	if (!capable(CAP_SYS_ADMIN))
2173		return -EPERM;
2174
2175	vol = memdup_user((void __user *)arg, sizeof(*vol));
2176	if (IS_ERR(vol))
2177		return PTR_ERR(vol);
2178	vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2179
2180	switch (cmd) {
2181	case BTRFS_IOC_SCAN_DEV:
2182		mutex_lock(&uuid_mutex);
2183		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2184					       &btrfs_root_fs_type);
2185		ret = PTR_ERR_OR_ZERO(device);
2186		mutex_unlock(&uuid_mutex);
2187		break;
2188	case BTRFS_IOC_FORGET_DEV:
2189		ret = btrfs_forget_devices(vol->name);
2190		break;
2191	case BTRFS_IOC_DEVICES_READY:
2192		mutex_lock(&uuid_mutex);
2193		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2194					       &btrfs_root_fs_type);
2195		if (IS_ERR(device)) {
2196			mutex_unlock(&uuid_mutex);
2197			ret = PTR_ERR(device);
2198			break;
2199		}
2200		ret = !(device->fs_devices->num_devices ==
2201			device->fs_devices->total_devices);
2202		mutex_unlock(&uuid_mutex);
2203		break;
2204	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2205		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2206		break;
2207	}
2208
2209	kfree(vol);
2210	return ret;
2211}
2212
2213static int btrfs_freeze(struct super_block *sb)
2214{
2215	struct btrfs_trans_handle *trans;
2216	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2217	struct btrfs_root *root = fs_info->tree_root;
2218
2219	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2220	/*
2221	 * We don't need a barrier here, we'll wait for any transaction that
2222	 * could be in progress on other threads (and do delayed iputs that
2223	 * we want to avoid on a frozen filesystem), or do the commit
2224	 * ourselves.
2225	 */
2226	trans = btrfs_attach_transaction_barrier(root);
2227	if (IS_ERR(trans)) {
2228		/* no transaction, don't bother */
2229		if (PTR_ERR(trans) == -ENOENT)
2230			return 0;
2231		return PTR_ERR(trans);
2232	}
2233	return btrfs_commit_transaction(trans);
2234}
2235
2236static int btrfs_unfreeze(struct super_block *sb)
2237{
2238	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2239
2240	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2241	return 0;
2242}
2243
2244static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2245{
2246	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2247	struct btrfs_fs_devices *cur_devices;
2248	struct btrfs_device *dev, *first_dev = NULL;
2249	struct list_head *head;
2250
2251	/*
2252	 * Lightweight locking of the devices. We should not need
2253	 * device_list_mutex here as we only read the device data and the list
2254	 * is protected by RCU.  Even if a device is deleted during the list
2255	 * traversals, we'll get valid data, the freeing callback will wait at
2256	 * least until the rcu_read_unlock.
2257	 */
2258	rcu_read_lock();
2259	cur_devices = fs_info->fs_devices;
2260	while (cur_devices) {
2261		head = &cur_devices->devices;
2262		list_for_each_entry_rcu(dev, head, dev_list) {
2263			if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2264				continue;
2265			if (!dev->name)
2266				continue;
2267			if (!first_dev || dev->devid < first_dev->devid)
2268				first_dev = dev;
2269		}
2270		cur_devices = cur_devices->seed;
2271	}
2272
2273	if (first_dev)
2274		seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
2275	else
2276		WARN_ON(1);
2277	rcu_read_unlock();
2278	return 0;
2279}
2280
2281static const struct super_operations btrfs_super_ops = {
2282	.drop_inode	= btrfs_drop_inode,
2283	.evict_inode	= btrfs_evict_inode,
2284	.put_super	= btrfs_put_super,
2285	.sync_fs	= btrfs_sync_fs,
2286	.show_options	= btrfs_show_options,
2287	.show_devname	= btrfs_show_devname,
2288	.alloc_inode	= btrfs_alloc_inode,
2289	.destroy_inode	= btrfs_destroy_inode,
2290	.free_inode	= btrfs_free_inode,
2291	.statfs		= btrfs_statfs,
2292	.remount_fs	= btrfs_remount,
2293	.freeze_fs	= btrfs_freeze,
2294	.unfreeze_fs	= btrfs_unfreeze,
2295};
2296
2297static const struct file_operations btrfs_ctl_fops = {
2298	.open = btrfs_control_open,
2299	.unlocked_ioctl	 = btrfs_control_ioctl,
2300	.compat_ioctl = btrfs_control_ioctl,
2301	.owner	 = THIS_MODULE,
2302	.llseek = noop_llseek,
2303};
2304
2305static struct miscdevice btrfs_misc = {
2306	.minor		= BTRFS_MINOR,
2307	.name		= "btrfs-control",
2308	.fops		= &btrfs_ctl_fops
2309};
2310
2311MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2312MODULE_ALIAS("devname:btrfs-control");
2313
2314static int __init btrfs_interface_init(void)
2315{
2316	return misc_register(&btrfs_misc);
2317}
2318
2319static __cold void btrfs_interface_exit(void)
2320{
2321	misc_deregister(&btrfs_misc);
2322}
2323
2324static void __init btrfs_print_mod_info(void)
2325{
2326	static const char options[] = ""
2327#ifdef CONFIG_BTRFS_DEBUG
2328			", debug=on"
2329#endif
2330#ifdef CONFIG_BTRFS_ASSERT
2331			", assert=on"
2332#endif
2333#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2334			", integrity-checker=on"
2335#endif
2336#ifdef CONFIG_BTRFS_FS_REF_VERIFY
2337			", ref-verify=on"
2338#endif
2339			;
2340	pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
2341}
2342
2343static int __init init_btrfs_fs(void)
2344{
2345	int err;
2346
2347	btrfs_props_init();
2348
2349	err = btrfs_init_sysfs();
2350	if (err)
2351		return err;
2352
2353	btrfs_init_compress();
2354
2355	err = btrfs_init_cachep();
2356	if (err)
2357		goto free_compress;
2358
2359	err = extent_io_init();
2360	if (err)
2361		goto free_cachep;
2362
2363	err = extent_map_init();
2364	if (err)
2365		goto free_extent_io;
2366
 
 
 
 
2367	err = ordered_data_init();
2368	if (err)
2369		goto free_extent_map;
2370
2371	err = btrfs_delayed_inode_init();
2372	if (err)
2373		goto free_ordered_data;
2374
2375	err = btrfs_auto_defrag_init();
2376	if (err)
2377		goto free_delayed_inode;
2378
2379	err = btrfs_delayed_ref_init();
2380	if (err)
2381		goto free_auto_defrag;
2382
2383	err = btrfs_prelim_ref_init();
2384	if (err)
2385		goto free_delayed_ref;
2386
2387	err = btrfs_end_io_wq_init();
2388	if (err)
2389		goto free_prelim_ref;
2390
2391	err = btrfs_interface_init();
2392	if (err)
2393		goto free_end_io_wq;
2394
2395	btrfs_init_lockdep();
2396
2397	btrfs_print_mod_info();
2398
2399	err = btrfs_run_sanity_tests();
2400	if (err)
2401		goto unregister_ioctl;
2402
2403	err = register_filesystem(&btrfs_fs_type);
2404	if (err)
2405		goto unregister_ioctl;
2406
2407	return 0;
2408
2409unregister_ioctl:
2410	btrfs_interface_exit();
2411free_end_io_wq:
2412	btrfs_end_io_wq_exit();
2413free_prelim_ref:
2414	btrfs_prelim_ref_exit();
2415free_delayed_ref:
2416	btrfs_delayed_ref_exit();
2417free_auto_defrag:
2418	btrfs_auto_defrag_exit();
2419free_delayed_inode:
2420	btrfs_delayed_inode_exit();
2421free_ordered_data:
2422	ordered_data_exit();
2423free_extent_map:
2424	extent_map_exit();
 
 
2425free_extent_io:
2426	extent_io_exit();
2427free_cachep:
2428	btrfs_destroy_cachep();
2429free_compress:
2430	btrfs_exit_compress();
2431	btrfs_exit_sysfs();
2432
2433	return err;
2434}
2435
2436static void __exit exit_btrfs_fs(void)
2437{
2438	btrfs_destroy_cachep();
2439	btrfs_delayed_ref_exit();
2440	btrfs_auto_defrag_exit();
2441	btrfs_delayed_inode_exit();
2442	btrfs_prelim_ref_exit();
2443	ordered_data_exit();
2444	extent_map_exit();
 
2445	extent_io_exit();
2446	btrfs_interface_exit();
2447	btrfs_end_io_wq_exit();
2448	unregister_filesystem(&btrfs_fs_type);
2449	btrfs_exit_sysfs();
2450	btrfs_cleanup_fs_uuids();
2451	btrfs_exit_compress();
2452}
2453
2454late_initcall(init_btrfs_fs);
2455module_exit(exit_btrfs_fs)
2456
2457MODULE_LICENSE("GPL");
2458MODULE_SOFTDEP("pre: crc32c");
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/module.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/highmem.h>
  11#include <linux/time.h>
  12#include <linux/init.h>
  13#include <linux/seq_file.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/mount.h>
  17#include <linux/writeback.h>
  18#include <linux/statfs.h>
  19#include <linux/compat.h>
  20#include <linux/parser.h>
  21#include <linux/ctype.h>
  22#include <linux/namei.h>
  23#include <linux/miscdevice.h>
  24#include <linux/magic.h>
  25#include <linux/slab.h>
  26#include <linux/cleancache.h>
  27#include <linux/ratelimit.h>
  28#include <linux/crc32c.h>
  29#include <linux/btrfs.h>
  30#include "delayed-inode.h"
  31#include "ctree.h"
  32#include "disk-io.h"
  33#include "transaction.h"
  34#include "btrfs_inode.h"
  35#include "print-tree.h"
  36#include "props.h"
  37#include "xattr.h"
  38#include "volumes.h"
  39#include "export.h"
  40#include "compression.h"
  41#include "rcu-string.h"
  42#include "dev-replace.h"
  43#include "free-space-cache.h"
  44#include "backref.h"
  45#include "space-info.h"
  46#include "sysfs.h"
  47#include "tests/btrfs-tests.h"
  48#include "block-group.h"
  49#include "discard.h"
  50
  51#include "qgroup.h"
  52#define CREATE_TRACE_POINTS
  53#include <trace/events/btrfs.h>
  54
  55static const struct super_operations btrfs_super_ops;
  56
  57/*
  58 * Types for mounting the default subvolume and a subvolume explicitly
  59 * requested by subvol=/path. That way the callchain is straightforward and we
  60 * don't have to play tricks with the mount options and recursive calls to
  61 * btrfs_mount.
  62 *
  63 * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
  64 */
  65static struct file_system_type btrfs_fs_type;
  66static struct file_system_type btrfs_root_fs_type;
  67
  68static int btrfs_remount(struct super_block *sb, int *flags, char *data);
  69
  70/*
  71 * Generally the error codes correspond to their respective errors, but there
  72 * are a few special cases.
  73 *
  74 * EUCLEAN: Any sort of corruption that we encounter.  The tree-checker for
  75 *          instance will return EUCLEAN if any of the blocks are corrupted in
  76 *          a way that is problematic.  We want to reserve EUCLEAN for these
  77 *          sort of corruptions.
  78 *
  79 * EROFS: If we check BTRFS_FS_STATE_ERROR and fail out with a return error, we
  80 *        need to use EROFS for this case.  We will have no idea of the
  81 *        original failure, that will have been reported at the time we tripped
  82 *        over the error.  Each subsequent error that doesn't have any context
  83 *        of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR.
  84 */
  85const char * __attribute_const__ btrfs_decode_error(int errno)
  86{
  87	char *errstr = "unknown";
  88
  89	switch (errno) {
  90	case -ENOENT:		/* -2 */
  91		errstr = "No such entry";
  92		break;
  93	case -EIO:		/* -5 */
  94		errstr = "IO failure";
  95		break;
  96	case -ENOMEM:		/* -12*/
  97		errstr = "Out of memory";
  98		break;
  99	case -EEXIST:		/* -17 */
 
 
 
 100		errstr = "Object already exists";
 101		break;
 102	case -ENOSPC:		/* -28 */
 103		errstr = "No space left";
 104		break;
 105	case -EROFS:		/* -30 */
 106		errstr = "Readonly filesystem";
 107		break;
 108	case -EOPNOTSUPP:	/* -95 */
 109		errstr = "Operation not supported";
 110		break;
 111	case -EUCLEAN:		/* -117 */
 112		errstr = "Filesystem corrupted";
 113		break;
 114	case -EDQUOT:		/* -122 */
 115		errstr = "Quota exceeded";
 116		break;
 117	}
 118
 119	return errstr;
 120}
 121
 122/*
 123 * __btrfs_handle_fs_error decodes expected errors from the caller and
 124 * invokes the appropriate error response.
 125 */
 126__cold
 127void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
 128		       unsigned int line, int errno, const char *fmt, ...)
 129{
 130	struct super_block *sb = fs_info->sb;
 131#ifdef CONFIG_PRINTK
 132	const char *errstr;
 133#endif
 134
 135	/*
 136	 * Special case: if the error is EROFS, and we're already
 137	 * under SB_RDONLY, then it is safe here.
 138	 */
 139	if (errno == -EROFS && sb_rdonly(sb))
 140  		return;
 141
 142#ifdef CONFIG_PRINTK
 143	errstr = btrfs_decode_error(errno);
 144	if (fmt) {
 145		struct va_format vaf;
 146		va_list args;
 147
 148		va_start(args, fmt);
 149		vaf.fmt = fmt;
 150		vaf.va = &args;
 151
 152		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
 153			sb->s_id, function, line, errno, errstr, &vaf);
 154		va_end(args);
 155	} else {
 156		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
 157			sb->s_id, function, line, errno, errstr);
 158	}
 159#endif
 160
 161	/*
 162	 * Today we only save the error info to memory.  Long term we'll
 163	 * also send it down to the disk
 164	 */
 165	set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 166
 167	/* Don't go through full error handling during mount */
 168	if (!(sb->s_flags & SB_BORN))
 169		return;
 170
 171	if (sb_rdonly(sb))
 172		return;
 173
 174	btrfs_discard_stop(fs_info);
 175
 176	/* btrfs handle error by forcing the filesystem readonly */
 177	sb->s_flags |= SB_RDONLY;
 178	btrfs_info(fs_info, "forced readonly");
 179	/*
 180	 * Note that a running device replace operation is not canceled here
 181	 * although there is no way to update the progress. It would add the
 182	 * risk of a deadlock, therefore the canceling is omitted. The only
 183	 * penalty is that some I/O remains active until the procedure
 184	 * completes. The next time when the filesystem is mounted writable
 185	 * again, the device replace operation continues.
 186	 */
 187}
 188
 189#ifdef CONFIG_PRINTK
 190static const char * const logtypes[] = {
 191	"emergency",
 192	"alert",
 193	"critical",
 194	"error",
 195	"warning",
 196	"notice",
 197	"info",
 198	"debug",
 199};
 200
 201
 202/*
 203 * Use one ratelimit state per log level so that a flood of less important
 204 * messages doesn't cause more important ones to be dropped.
 205 */
 206static struct ratelimit_state printk_limits[] = {
 207	RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
 208	RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
 209	RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
 210	RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
 211	RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
 212	RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
 213	RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
 214	RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
 215};
 216
 217void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 218{
 219	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
 220	struct va_format vaf;
 221	va_list args;
 222	int kern_level;
 223	const char *type = logtypes[4];
 224	struct ratelimit_state *ratelimit = &printk_limits[4];
 225
 226	va_start(args, fmt);
 227
 228	while ((kern_level = printk_get_level(fmt)) != 0) {
 229		size_t size = printk_skip_level(fmt) - fmt;
 230
 231		if (kern_level >= '0' && kern_level <= '7') {
 232			memcpy(lvl, fmt,  size);
 233			lvl[size] = '\0';
 234			type = logtypes[kern_level - '0'];
 235			ratelimit = &printk_limits[kern_level - '0'];
 236		}
 237		fmt += size;
 238	}
 239
 240	vaf.fmt = fmt;
 241	vaf.va = &args;
 242
 243	if (__ratelimit(ratelimit))
 244		printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
 245			fs_info ? fs_info->sb->s_id : "<unknown>", &vaf);
 246
 247	va_end(args);
 248}
 249#endif
 250
 251/*
 252 * We only mark the transaction aborted and then set the file system read-only.
 253 * This will prevent new transactions from starting or trying to join this
 254 * one.
 255 *
 256 * This means that error recovery at the call site is limited to freeing
 257 * any local memory allocations and passing the error code up without
 258 * further cleanup. The transaction should complete as it normally would
 259 * in the call path but will return -EIO.
 260 *
 261 * We'll complete the cleanup in btrfs_end_transaction and
 262 * btrfs_commit_transaction.
 263 */
 264__cold
 265void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
 266			       const char *function,
 267			       unsigned int line, int errno)
 268{
 269	struct btrfs_fs_info *fs_info = trans->fs_info;
 270
 271	WRITE_ONCE(trans->aborted, errno);
 272	/* Nothing used. The other threads that have joined this
 273	 * transaction may be able to continue. */
 274	if (!trans->dirty && list_empty(&trans->new_bgs)) {
 275		const char *errstr;
 276
 277		errstr = btrfs_decode_error(errno);
 278		btrfs_warn(fs_info,
 279		           "%s:%d: Aborting unused transaction(%s).",
 280		           function, line, errstr);
 281		return;
 282	}
 283	WRITE_ONCE(trans->transaction->aborted, errno);
 284	/* Wake up anybody who may be waiting on this transaction */
 285	wake_up(&fs_info->transaction_wait);
 286	wake_up(&fs_info->transaction_blocked_wait);
 287	__btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
 288}
 289/*
 290 * __btrfs_panic decodes unexpected, fatal errors from the caller,
 291 * issues an alert, and either panics or BUGs, depending on mount options.
 292 */
 293__cold
 294void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
 295		   unsigned int line, int errno, const char *fmt, ...)
 296{
 297	char *s_id = "<unknown>";
 298	const char *errstr;
 299	struct va_format vaf = { .fmt = fmt };
 300	va_list args;
 301
 302	if (fs_info)
 303		s_id = fs_info->sb->s_id;
 304
 305	va_start(args, fmt);
 306	vaf.va = &args;
 307
 308	errstr = btrfs_decode_error(errno);
 309	if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
 310		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
 311			s_id, function, line, &vaf, errno, errstr);
 312
 313	btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
 314		   function, line, &vaf, errno, errstr);
 315	va_end(args);
 316	/* Caller calls BUG() */
 317}
 318
 319static void btrfs_put_super(struct super_block *sb)
 320{
 321	close_ctree(btrfs_sb(sb));
 322}
 323
 324enum {
 325	Opt_acl, Opt_noacl,
 326	Opt_clear_cache,
 327	Opt_commit_interval,
 328	Opt_compress,
 329	Opt_compress_force,
 330	Opt_compress_force_type,
 331	Opt_compress_type,
 332	Opt_degraded,
 333	Opt_device,
 334	Opt_fatal_errors,
 335	Opt_flushoncommit, Opt_noflushoncommit,
 336	Opt_inode_cache, Opt_noinode_cache,
 337	Opt_max_inline,
 338	Opt_barrier, Opt_nobarrier,
 339	Opt_datacow, Opt_nodatacow,
 340	Opt_datasum, Opt_nodatasum,
 341	Opt_defrag, Opt_nodefrag,
 342	Opt_discard, Opt_nodiscard,
 343	Opt_discard_mode,
 344	Opt_norecovery,
 345	Opt_ratio,
 346	Opt_rescan_uuid_tree,
 347	Opt_skip_balance,
 348	Opt_space_cache, Opt_no_space_cache,
 349	Opt_space_cache_version,
 350	Opt_ssd, Opt_nossd,
 351	Opt_ssd_spread, Opt_nossd_spread,
 352	Opt_subvol,
 353	Opt_subvol_empty,
 354	Opt_subvolid,
 355	Opt_thread_pool,
 356	Opt_treelog, Opt_notreelog,
 
 357	Opt_user_subvol_rm_allowed,
 358
 359	/* Rescue options */
 360	Opt_rescue,
 361	Opt_usebackuproot,
 362	Opt_nologreplay,
 363
 364	/* Deprecated options */
 
 365	Opt_recovery,
 
 366
 367	/* Debugging options */
 368	Opt_check_integrity,
 369	Opt_check_integrity_including_extent_data,
 370	Opt_check_integrity_print_mask,
 371	Opt_enospc_debug, Opt_noenospc_debug,
 372#ifdef CONFIG_BTRFS_DEBUG
 373	Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
 374#endif
 375#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 376	Opt_ref_verify,
 377#endif
 378	Opt_err,
 379};
 380
 381static const match_table_t tokens = {
 382	{Opt_acl, "acl"},
 383	{Opt_noacl, "noacl"},
 384	{Opt_clear_cache, "clear_cache"},
 385	{Opt_commit_interval, "commit=%u"},
 386	{Opt_compress, "compress"},
 387	{Opt_compress_type, "compress=%s"},
 388	{Opt_compress_force, "compress-force"},
 389	{Opt_compress_force_type, "compress-force=%s"},
 390	{Opt_degraded, "degraded"},
 391	{Opt_device, "device=%s"},
 392	{Opt_fatal_errors, "fatal_errors=%s"},
 393	{Opt_flushoncommit, "flushoncommit"},
 394	{Opt_noflushoncommit, "noflushoncommit"},
 395	{Opt_inode_cache, "inode_cache"},
 396	{Opt_noinode_cache, "noinode_cache"},
 397	{Opt_max_inline, "max_inline=%s"},
 398	{Opt_barrier, "barrier"},
 399	{Opt_nobarrier, "nobarrier"},
 400	{Opt_datacow, "datacow"},
 401	{Opt_nodatacow, "nodatacow"},
 402	{Opt_datasum, "datasum"},
 403	{Opt_nodatasum, "nodatasum"},
 404	{Opt_defrag, "autodefrag"},
 405	{Opt_nodefrag, "noautodefrag"},
 406	{Opt_discard, "discard"},
 407	{Opt_discard_mode, "discard=%s"},
 408	{Opt_nodiscard, "nodiscard"},
 
 409	{Opt_norecovery, "norecovery"},
 410	{Opt_ratio, "metadata_ratio=%u"},
 411	{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
 412	{Opt_skip_balance, "skip_balance"},
 413	{Opt_space_cache, "space_cache"},
 414	{Opt_no_space_cache, "nospace_cache"},
 415	{Opt_space_cache_version, "space_cache=%s"},
 416	{Opt_ssd, "ssd"},
 417	{Opt_nossd, "nossd"},
 418	{Opt_ssd_spread, "ssd_spread"},
 419	{Opt_nossd_spread, "nossd_spread"},
 420	{Opt_subvol, "subvol=%s"},
 421	{Opt_subvol_empty, "subvol="},
 422	{Opt_subvolid, "subvolid=%s"},
 423	{Opt_thread_pool, "thread_pool=%u"},
 424	{Opt_treelog, "treelog"},
 425	{Opt_notreelog, "notreelog"},
 
 426	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
 427
 428	/* Rescue options */
 429	{Opt_rescue, "rescue=%s"},
 430	/* Deprecated, with alias rescue=nologreplay */
 431	{Opt_nologreplay, "nologreplay"},
 432	/* Deprecated, with alias rescue=usebackuproot */
 433	{Opt_usebackuproot, "usebackuproot"},
 434
 435	/* Deprecated options */
 
 436	{Opt_recovery, "recovery"},
 
 437
 438	/* Debugging options */
 439	{Opt_check_integrity, "check_int"},
 440	{Opt_check_integrity_including_extent_data, "check_int_data"},
 441	{Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
 442	{Opt_enospc_debug, "enospc_debug"},
 443	{Opt_noenospc_debug, "noenospc_debug"},
 444#ifdef CONFIG_BTRFS_DEBUG
 445	{Opt_fragment_data, "fragment=data"},
 446	{Opt_fragment_metadata, "fragment=metadata"},
 447	{Opt_fragment_all, "fragment=all"},
 448#endif
 449#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 450	{Opt_ref_verify, "ref_verify"},
 451#endif
 452	{Opt_err, NULL},
 453};
 454
 455static const match_table_t rescue_tokens = {
 456	{Opt_usebackuproot, "usebackuproot"},
 457	{Opt_nologreplay, "nologreplay"},
 458	{Opt_err, NULL},
 459};
 460
 461static int parse_rescue_options(struct btrfs_fs_info *info, const char *options)
 462{
 463	char *opts;
 464	char *orig;
 465	char *p;
 466	substring_t args[MAX_OPT_ARGS];
 467	int ret = 0;
 468
 469	opts = kstrdup(options, GFP_KERNEL);
 470	if (!opts)
 471		return -ENOMEM;
 472	orig = opts;
 473
 474	while ((p = strsep(&opts, ":")) != NULL) {
 475		int token;
 476
 477		if (!*p)
 478			continue;
 479		token = match_token(p, rescue_tokens, args);
 480		switch (token){
 481		case Opt_usebackuproot:
 482			btrfs_info(info,
 483				   "trying to use backup root at mount time");
 484			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 485			break;
 486		case Opt_nologreplay:
 487			btrfs_set_and_info(info, NOLOGREPLAY,
 488					   "disabling log replay at mount time");
 489			break;
 490		case Opt_err:
 491			btrfs_info(info, "unrecognized rescue option '%s'", p);
 492			ret = -EINVAL;
 493			goto out;
 494		default:
 495			break;
 496		}
 497
 498	}
 499out:
 500	kfree(orig);
 501	return ret;
 502}
 503
 504/*
 505 * Regular mount options parser.  Everything that is needed only when
 506 * reading in a new superblock is parsed here.
 507 * XXX JDM: This needs to be cleaned up for remount.
 508 */
 509int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
 510			unsigned long new_flags)
 511{
 512	substring_t args[MAX_OPT_ARGS];
 513	char *p, *num;
 514	u64 cache_gen;
 515	int intarg;
 516	int ret = 0;
 517	char *compress_type;
 518	bool compress_force = false;
 519	enum btrfs_compression_type saved_compress_type;
 520	int saved_compress_level;
 521	bool saved_compress_force;
 522	int no_compress = 0;
 523
 524	cache_gen = btrfs_super_cache_generation(info->super_copy);
 525	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
 526		btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
 527	else if (cache_gen)
 528		btrfs_set_opt(info->mount_opt, SPACE_CACHE);
 529
 530	/*
 531	 * Even the options are empty, we still need to do extra check
 532	 * against new flags
 533	 */
 534	if (!options)
 535		goto check;
 536
 537	while ((p = strsep(&options, ",")) != NULL) {
 538		int token;
 539		if (!*p)
 540			continue;
 541
 542		token = match_token(p, tokens, args);
 543		switch (token) {
 544		case Opt_degraded:
 545			btrfs_info(info, "allowing degraded mounts");
 546			btrfs_set_opt(info->mount_opt, DEGRADED);
 547			break;
 548		case Opt_subvol:
 549		case Opt_subvol_empty:
 550		case Opt_subvolid:
 
 551		case Opt_device:
 552			/*
 553			 * These are parsed by btrfs_parse_subvol_options or
 554			 * btrfs_parse_device_options and can be ignored here.
 555			 */
 556			break;
 557		case Opt_nodatasum:
 558			btrfs_set_and_info(info, NODATASUM,
 559					   "setting nodatasum");
 560			break;
 561		case Opt_datasum:
 562			if (btrfs_test_opt(info, NODATASUM)) {
 563				if (btrfs_test_opt(info, NODATACOW))
 564					btrfs_info(info,
 565						   "setting datasum, datacow enabled");
 566				else
 567					btrfs_info(info, "setting datasum");
 568			}
 569			btrfs_clear_opt(info->mount_opt, NODATACOW);
 570			btrfs_clear_opt(info->mount_opt, NODATASUM);
 571			break;
 572		case Opt_nodatacow:
 573			if (!btrfs_test_opt(info, NODATACOW)) {
 574				if (!btrfs_test_opt(info, COMPRESS) ||
 575				    !btrfs_test_opt(info, FORCE_COMPRESS)) {
 576					btrfs_info(info,
 577						   "setting nodatacow, compression disabled");
 578				} else {
 579					btrfs_info(info, "setting nodatacow");
 580				}
 581			}
 582			btrfs_clear_opt(info->mount_opt, COMPRESS);
 583			btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 584			btrfs_set_opt(info->mount_opt, NODATACOW);
 585			btrfs_set_opt(info->mount_opt, NODATASUM);
 586			break;
 587		case Opt_datacow:
 588			btrfs_clear_and_info(info, NODATACOW,
 589					     "setting datacow");
 590			break;
 591		case Opt_compress_force:
 592		case Opt_compress_force_type:
 593			compress_force = true;
 594			fallthrough;
 595		case Opt_compress:
 596		case Opt_compress_type:
 597			saved_compress_type = btrfs_test_opt(info,
 598							     COMPRESS) ?
 599				info->compress_type : BTRFS_COMPRESS_NONE;
 600			saved_compress_force =
 601				btrfs_test_opt(info, FORCE_COMPRESS);
 602			saved_compress_level = info->compress_level;
 603			if (token == Opt_compress ||
 604			    token == Opt_compress_force ||
 605			    strncmp(args[0].from, "zlib", 4) == 0) {
 606				compress_type = "zlib";
 607
 608				info->compress_type = BTRFS_COMPRESS_ZLIB;
 609				info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
 610				/*
 611				 * args[0] contains uninitialized data since
 612				 * for these tokens we don't expect any
 613				 * parameter.
 614				 */
 615				if (token != Opt_compress &&
 616				    token != Opt_compress_force)
 617					info->compress_level =
 618					  btrfs_compress_str2level(
 619							BTRFS_COMPRESS_ZLIB,
 620							args[0].from + 4);
 621				btrfs_set_opt(info->mount_opt, COMPRESS);
 622				btrfs_clear_opt(info->mount_opt, NODATACOW);
 623				btrfs_clear_opt(info->mount_opt, NODATASUM);
 624				no_compress = 0;
 625			} else if (strncmp(args[0].from, "lzo", 3) == 0) {
 626				compress_type = "lzo";
 627				info->compress_type = BTRFS_COMPRESS_LZO;
 628				info->compress_level = 0;
 629				btrfs_set_opt(info->mount_opt, COMPRESS);
 630				btrfs_clear_opt(info->mount_opt, NODATACOW);
 631				btrfs_clear_opt(info->mount_opt, NODATASUM);
 632				btrfs_set_fs_incompat(info, COMPRESS_LZO);
 633				no_compress = 0;
 634			} else if (strncmp(args[0].from, "zstd", 4) == 0) {
 635				compress_type = "zstd";
 636				info->compress_type = BTRFS_COMPRESS_ZSTD;
 637				info->compress_level =
 638					btrfs_compress_str2level(
 639							 BTRFS_COMPRESS_ZSTD,
 640							 args[0].from + 4);
 641				btrfs_set_opt(info->mount_opt, COMPRESS);
 642				btrfs_clear_opt(info->mount_opt, NODATACOW);
 643				btrfs_clear_opt(info->mount_opt, NODATASUM);
 644				btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
 645				no_compress = 0;
 646			} else if (strncmp(args[0].from, "no", 2) == 0) {
 647				compress_type = "no";
 648				info->compress_level = 0;
 649				info->compress_type = 0;
 650				btrfs_clear_opt(info->mount_opt, COMPRESS);
 651				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 652				compress_force = false;
 653				no_compress++;
 654			} else {
 655				ret = -EINVAL;
 656				goto out;
 657			}
 658
 659			if (compress_force) {
 660				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
 661			} else {
 662				/*
 663				 * If we remount from compress-force=xxx to
 664				 * compress=xxx, we need clear FORCE_COMPRESS
 665				 * flag, otherwise, there is no way for users
 666				 * to disable forcible compression separately.
 667				 */
 668				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 669			}
 670			if (no_compress == 1) {
 671				btrfs_info(info, "use no compression");
 672			} else if ((info->compress_type != saved_compress_type) ||
 673				   (compress_force != saved_compress_force) ||
 674				   (info->compress_level != saved_compress_level)) {
 675				btrfs_info(info, "%s %s compression, level %d",
 676					   (compress_force) ? "force" : "use",
 677					   compress_type, info->compress_level);
 678			}
 679			compress_force = false;
 680			break;
 681		case Opt_ssd:
 682			btrfs_set_and_info(info, SSD,
 683					   "enabling ssd optimizations");
 684			btrfs_clear_opt(info->mount_opt, NOSSD);
 685			break;
 686		case Opt_ssd_spread:
 687			btrfs_set_and_info(info, SSD,
 688					   "enabling ssd optimizations");
 689			btrfs_set_and_info(info, SSD_SPREAD,
 690					   "using spread ssd allocation scheme");
 691			btrfs_clear_opt(info->mount_opt, NOSSD);
 692			break;
 693		case Opt_nossd:
 694			btrfs_set_opt(info->mount_opt, NOSSD);
 695			btrfs_clear_and_info(info, SSD,
 696					     "not using ssd optimizations");
 697			fallthrough;
 698		case Opt_nossd_spread:
 699			btrfs_clear_and_info(info, SSD_SPREAD,
 700					     "not using spread ssd allocation scheme");
 701			break;
 702		case Opt_barrier:
 703			btrfs_clear_and_info(info, NOBARRIER,
 704					     "turning on barriers");
 705			break;
 706		case Opt_nobarrier:
 707			btrfs_set_and_info(info, NOBARRIER,
 708					   "turning off barriers");
 709			break;
 710		case Opt_thread_pool:
 711			ret = match_int(&args[0], &intarg);
 712			if (ret) {
 713				goto out;
 714			} else if (intarg == 0) {
 715				ret = -EINVAL;
 716				goto out;
 717			}
 718			info->thread_pool_size = intarg;
 719			break;
 720		case Opt_max_inline:
 721			num = match_strdup(&args[0]);
 722			if (num) {
 723				info->max_inline = memparse(num, NULL);
 724				kfree(num);
 725
 726				if (info->max_inline) {
 727					info->max_inline = min_t(u64,
 728						info->max_inline,
 729						info->sectorsize);
 730				}
 731				btrfs_info(info, "max_inline at %llu",
 732					   info->max_inline);
 733			} else {
 734				ret = -ENOMEM;
 735				goto out;
 736			}
 737			break;
 
 
 
 
 738		case Opt_acl:
 739#ifdef CONFIG_BTRFS_FS_POSIX_ACL
 740			info->sb->s_flags |= SB_POSIXACL;
 741			break;
 742#else
 743			btrfs_err(info, "support for ACL not compiled in!");
 744			ret = -EINVAL;
 745			goto out;
 746#endif
 747		case Opt_noacl:
 748			info->sb->s_flags &= ~SB_POSIXACL;
 749			break;
 750		case Opt_notreelog:
 751			btrfs_set_and_info(info, NOTREELOG,
 752					   "disabling tree log");
 753			break;
 754		case Opt_treelog:
 755			btrfs_clear_and_info(info, NOTREELOG,
 756					     "enabling tree log");
 757			break;
 758		case Opt_norecovery:
 759		case Opt_nologreplay:
 760			btrfs_warn(info,
 761		"'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
 762			btrfs_set_and_info(info, NOLOGREPLAY,
 763					   "disabling log replay at mount time");
 764			break;
 765		case Opt_flushoncommit:
 766			btrfs_set_and_info(info, FLUSHONCOMMIT,
 767					   "turning on flush-on-commit");
 768			break;
 769		case Opt_noflushoncommit:
 770			btrfs_clear_and_info(info, FLUSHONCOMMIT,
 771					     "turning off flush-on-commit");
 772			break;
 773		case Opt_ratio:
 774			ret = match_int(&args[0], &intarg);
 775			if (ret)
 776				goto out;
 777			info->metadata_ratio = intarg;
 778			btrfs_info(info, "metadata ratio %u",
 779				   info->metadata_ratio);
 780			break;
 781		case Opt_discard:
 782		case Opt_discard_mode:
 783			if (token == Opt_discard ||
 784			    strcmp(args[0].from, "sync") == 0) {
 785				btrfs_clear_opt(info->mount_opt, DISCARD_ASYNC);
 786				btrfs_set_and_info(info, DISCARD_SYNC,
 787						   "turning on sync discard");
 788			} else if (strcmp(args[0].from, "async") == 0) {
 789				btrfs_clear_opt(info->mount_opt, DISCARD_SYNC);
 790				btrfs_set_and_info(info, DISCARD_ASYNC,
 791						   "turning on async discard");
 792			} else {
 793				ret = -EINVAL;
 794				goto out;
 795			}
 796			break;
 797		case Opt_nodiscard:
 798			btrfs_clear_and_info(info, DISCARD_SYNC,
 799					     "turning off discard");
 800			btrfs_clear_and_info(info, DISCARD_ASYNC,
 801					     "turning off async discard");
 802			break;
 803		case Opt_space_cache:
 804		case Opt_space_cache_version:
 805			if (token == Opt_space_cache ||
 806			    strcmp(args[0].from, "v1") == 0) {
 807				btrfs_clear_opt(info->mount_opt,
 808						FREE_SPACE_TREE);
 809				btrfs_set_and_info(info, SPACE_CACHE,
 810					   "enabling disk space caching");
 811			} else if (strcmp(args[0].from, "v2") == 0) {
 812				btrfs_clear_opt(info->mount_opt,
 813						SPACE_CACHE);
 814				btrfs_set_and_info(info, FREE_SPACE_TREE,
 815						   "enabling free space tree");
 816			} else {
 817				ret = -EINVAL;
 818				goto out;
 819			}
 820			break;
 821		case Opt_rescan_uuid_tree:
 822			btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
 823			break;
 824		case Opt_no_space_cache:
 825			if (btrfs_test_opt(info, SPACE_CACHE)) {
 826				btrfs_clear_and_info(info, SPACE_CACHE,
 827					     "disabling disk space caching");
 828			}
 829			if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
 830				btrfs_clear_and_info(info, FREE_SPACE_TREE,
 831					     "disabling free space tree");
 832			}
 833			break;
 834		case Opt_inode_cache:
 835			btrfs_warn(info,
 836	"the 'inode_cache' option is deprecated and will have no effect from 5.11");
 837			btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
 838					   "enabling inode map caching");
 839			break;
 840		case Opt_noinode_cache:
 841			btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
 842					     "disabling inode map caching");
 843			break;
 844		case Opt_clear_cache:
 845			btrfs_set_and_info(info, CLEAR_CACHE,
 846					   "force clearing of disk cache");
 847			break;
 848		case Opt_user_subvol_rm_allowed:
 849			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
 850			break;
 851		case Opt_enospc_debug:
 852			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
 853			break;
 854		case Opt_noenospc_debug:
 855			btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
 856			break;
 857		case Opt_defrag:
 858			btrfs_set_and_info(info, AUTO_DEFRAG,
 859					   "enabling auto defrag");
 860			break;
 861		case Opt_nodefrag:
 862			btrfs_clear_and_info(info, AUTO_DEFRAG,
 863					     "disabling auto defrag");
 864			break;
 865		case Opt_recovery:
 
 
 
 866		case Opt_usebackuproot:
 867			btrfs_warn(info,
 868			"'%s' is deprecated, use 'rescue=usebackuproot' instead",
 869				   token == Opt_recovery ? "recovery" :
 870				   "usebackuproot");
 871			btrfs_info(info,
 872				   "trying to use backup root at mount time");
 873			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 874			break;
 875		case Opt_skip_balance:
 876			btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
 877			break;
 878#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 879		case Opt_check_integrity_including_extent_data:
 880			btrfs_info(info,
 881				   "enabling check integrity including extent data");
 882			btrfs_set_opt(info->mount_opt,
 883				      CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
 884			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 885			break;
 886		case Opt_check_integrity:
 887			btrfs_info(info, "enabling check integrity");
 888			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 889			break;
 890		case Opt_check_integrity_print_mask:
 891			ret = match_int(&args[0], &intarg);
 892			if (ret)
 893				goto out;
 894			info->check_integrity_print_mask = intarg;
 895			btrfs_info(info, "check_integrity_print_mask 0x%x",
 896				   info->check_integrity_print_mask);
 897			break;
 898#else
 899		case Opt_check_integrity_including_extent_data:
 900		case Opt_check_integrity:
 901		case Opt_check_integrity_print_mask:
 902			btrfs_err(info,
 903				  "support for check_integrity* not compiled in!");
 904			ret = -EINVAL;
 905			goto out;
 906#endif
 907		case Opt_fatal_errors:
 908			if (strcmp(args[0].from, "panic") == 0)
 909				btrfs_set_opt(info->mount_opt,
 910					      PANIC_ON_FATAL_ERROR);
 911			else if (strcmp(args[0].from, "bug") == 0)
 912				btrfs_clear_opt(info->mount_opt,
 913					      PANIC_ON_FATAL_ERROR);
 914			else {
 915				ret = -EINVAL;
 916				goto out;
 917			}
 918			break;
 919		case Opt_commit_interval:
 920			intarg = 0;
 921			ret = match_int(&args[0], &intarg);
 922			if (ret)
 923				goto out;
 924			if (intarg == 0) {
 925				btrfs_info(info,
 926					   "using default commit interval %us",
 927					   BTRFS_DEFAULT_COMMIT_INTERVAL);
 928				intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
 929			} else if (intarg > 300) {
 930				btrfs_warn(info, "excessive commit interval %d",
 931					   intarg);
 932			}
 933			info->commit_interval = intarg;
 934			break;
 935		case Opt_rescue:
 936			ret = parse_rescue_options(info, args[0].from);
 937			if (ret < 0)
 938				goto out;
 939			break;
 940#ifdef CONFIG_BTRFS_DEBUG
 941		case Opt_fragment_all:
 942			btrfs_info(info, "fragmenting all space");
 943			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 944			btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
 945			break;
 946		case Opt_fragment_metadata:
 947			btrfs_info(info, "fragmenting metadata");
 948			btrfs_set_opt(info->mount_opt,
 949				      FRAGMENT_METADATA);
 950			break;
 951		case Opt_fragment_data:
 952			btrfs_info(info, "fragmenting data");
 953			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 954			break;
 955#endif
 956#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 957		case Opt_ref_verify:
 958			btrfs_info(info, "doing ref verification");
 959			btrfs_set_opt(info->mount_opt, REF_VERIFY);
 960			break;
 961#endif
 962		case Opt_err:
 963			btrfs_err(info, "unrecognized mount option '%s'", p);
 964			ret = -EINVAL;
 965			goto out;
 966		default:
 967			break;
 968		}
 969	}
 970check:
 971	/*
 972	 * Extra check for current option against current flag
 973	 */
 974	if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
 975		btrfs_err(info,
 976			  "nologreplay must be used with ro mount option");
 977		ret = -EINVAL;
 978	}
 979out:
 980	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
 981	    !btrfs_test_opt(info, FREE_SPACE_TREE) &&
 982	    !btrfs_test_opt(info, CLEAR_CACHE)) {
 983		btrfs_err(info, "cannot disable free space tree");
 984		ret = -EINVAL;
 985
 986	}
 987	if (!ret && btrfs_test_opt(info, SPACE_CACHE))
 988		btrfs_info(info, "disk space caching is enabled");
 989	if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
 990		btrfs_info(info, "using free space tree");
 991	return ret;
 992}
 993
 994/*
 995 * Parse mount options that are required early in the mount process.
 996 *
 997 * All other options will be parsed on much later in the mount process and
 998 * only when we need to allocate a new super block.
 999 */
1000static int btrfs_parse_device_options(const char *options, fmode_t flags,
1001				      void *holder)
1002{
1003	substring_t args[MAX_OPT_ARGS];
1004	char *device_name, *opts, *orig, *p;
1005	struct btrfs_device *device = NULL;
1006	int error = 0;
1007
1008	lockdep_assert_held(&uuid_mutex);
1009
1010	if (!options)
1011		return 0;
1012
1013	/*
1014	 * strsep changes the string, duplicate it because btrfs_parse_options
1015	 * gets called later
1016	 */
1017	opts = kstrdup(options, GFP_KERNEL);
1018	if (!opts)
1019		return -ENOMEM;
1020	orig = opts;
1021
1022	while ((p = strsep(&opts, ",")) != NULL) {
1023		int token;
1024
1025		if (!*p)
1026			continue;
1027
1028		token = match_token(p, tokens, args);
1029		if (token == Opt_device) {
1030			device_name = match_strdup(&args[0]);
1031			if (!device_name) {
1032				error = -ENOMEM;
1033				goto out;
1034			}
1035			device = btrfs_scan_one_device(device_name, flags,
1036					holder);
1037			kfree(device_name);
1038			if (IS_ERR(device)) {
1039				error = PTR_ERR(device);
1040				goto out;
1041			}
1042		}
1043	}
1044
1045out:
1046	kfree(orig);
1047	return error;
1048}
1049
1050/*
1051 * Parse mount options that are related to subvolume id
1052 *
1053 * The value is later passed to mount_subvol()
1054 */
1055static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
1056		u64 *subvol_objectid)
1057{
1058	substring_t args[MAX_OPT_ARGS];
1059	char *opts, *orig, *p;
1060	int error = 0;
1061	u64 subvolid;
1062
1063	if (!options)
1064		return 0;
1065
1066	/*
1067	 * strsep changes the string, duplicate it because
1068	 * btrfs_parse_device_options gets called later
1069	 */
1070	opts = kstrdup(options, GFP_KERNEL);
1071	if (!opts)
1072		return -ENOMEM;
1073	orig = opts;
1074
1075	while ((p = strsep(&opts, ",")) != NULL) {
1076		int token;
1077		if (!*p)
1078			continue;
1079
1080		token = match_token(p, tokens, args);
1081		switch (token) {
1082		case Opt_subvol:
1083			kfree(*subvol_name);
1084			*subvol_name = match_strdup(&args[0]);
1085			if (!*subvol_name) {
1086				error = -ENOMEM;
1087				goto out;
1088			}
1089			break;
1090		case Opt_subvolid:
1091			error = match_u64(&args[0], &subvolid);
1092			if (error)
1093				goto out;
1094
1095			/* we want the original fs_tree */
1096			if (subvolid == 0)
1097				subvolid = BTRFS_FS_TREE_OBJECTID;
1098
1099			*subvol_objectid = subvolid;
1100			break;
 
 
 
1101		default:
1102			break;
1103		}
1104	}
1105
1106out:
1107	kfree(orig);
1108	return error;
1109}
1110
1111char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1112					  u64 subvol_objectid)
1113{
1114	struct btrfs_root *root = fs_info->tree_root;
1115	struct btrfs_root *fs_root = NULL;
1116	struct btrfs_root_ref *root_ref;
1117	struct btrfs_inode_ref *inode_ref;
1118	struct btrfs_key key;
1119	struct btrfs_path *path = NULL;
1120	char *name = NULL, *ptr;
1121	u64 dirid;
1122	int len;
1123	int ret;
1124
1125	path = btrfs_alloc_path();
1126	if (!path) {
1127		ret = -ENOMEM;
1128		goto err;
1129	}
1130	path->leave_spinning = 1;
1131
1132	name = kmalloc(PATH_MAX, GFP_KERNEL);
1133	if (!name) {
1134		ret = -ENOMEM;
1135		goto err;
1136	}
1137	ptr = name + PATH_MAX - 1;
1138	ptr[0] = '\0';
1139
1140	/*
1141	 * Walk up the subvolume trees in the tree of tree roots by root
1142	 * backrefs until we hit the top-level subvolume.
1143	 */
1144	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
1145		key.objectid = subvol_objectid;
1146		key.type = BTRFS_ROOT_BACKREF_KEY;
1147		key.offset = (u64)-1;
1148
1149		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1150		if (ret < 0) {
1151			goto err;
1152		} else if (ret > 0) {
1153			ret = btrfs_previous_item(root, path, subvol_objectid,
1154						  BTRFS_ROOT_BACKREF_KEY);
1155			if (ret < 0) {
1156				goto err;
1157			} else if (ret > 0) {
1158				ret = -ENOENT;
1159				goto err;
1160			}
1161		}
1162
1163		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1164		subvol_objectid = key.offset;
1165
1166		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1167					  struct btrfs_root_ref);
1168		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
1169		ptr -= len + 1;
1170		if (ptr < name) {
1171			ret = -ENAMETOOLONG;
1172			goto err;
1173		}
1174		read_extent_buffer(path->nodes[0], ptr + 1,
1175				   (unsigned long)(root_ref + 1), len);
1176		ptr[0] = '/';
1177		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
1178		btrfs_release_path(path);
1179
1180		fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true);
 
 
 
1181		if (IS_ERR(fs_root)) {
1182			ret = PTR_ERR(fs_root);
1183			fs_root = NULL;
1184			goto err;
1185		}
1186
1187		/*
1188		 * Walk up the filesystem tree by inode refs until we hit the
1189		 * root directory.
1190		 */
1191		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
1192			key.objectid = dirid;
1193			key.type = BTRFS_INODE_REF_KEY;
1194			key.offset = (u64)-1;
1195
1196			ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1197			if (ret < 0) {
1198				goto err;
1199			} else if (ret > 0) {
1200				ret = btrfs_previous_item(fs_root, path, dirid,
1201							  BTRFS_INODE_REF_KEY);
1202				if (ret < 0) {
1203					goto err;
1204				} else if (ret > 0) {
1205					ret = -ENOENT;
1206					goto err;
1207				}
1208			}
1209
1210			btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1211			dirid = key.offset;
1212
1213			inode_ref = btrfs_item_ptr(path->nodes[0],
1214						   path->slots[0],
1215						   struct btrfs_inode_ref);
1216			len = btrfs_inode_ref_name_len(path->nodes[0],
1217						       inode_ref);
1218			ptr -= len + 1;
1219			if (ptr < name) {
1220				ret = -ENAMETOOLONG;
1221				goto err;
1222			}
1223			read_extent_buffer(path->nodes[0], ptr + 1,
1224					   (unsigned long)(inode_ref + 1), len);
1225			ptr[0] = '/';
1226			btrfs_release_path(path);
1227		}
1228		btrfs_put_root(fs_root);
1229		fs_root = NULL;
1230	}
1231
1232	btrfs_free_path(path);
1233	if (ptr == name + PATH_MAX - 1) {
1234		name[0] = '/';
1235		name[1] = '\0';
1236	} else {
1237		memmove(name, ptr, name + PATH_MAX - ptr);
1238	}
1239	return name;
1240
1241err:
1242	btrfs_put_root(fs_root);
1243	btrfs_free_path(path);
1244	kfree(name);
1245	return ERR_PTR(ret);
1246}
1247
1248static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
1249{
1250	struct btrfs_root *root = fs_info->tree_root;
1251	struct btrfs_dir_item *di;
1252	struct btrfs_path *path;
1253	struct btrfs_key location;
1254	u64 dir_id;
1255
1256	path = btrfs_alloc_path();
1257	if (!path)
1258		return -ENOMEM;
1259	path->leave_spinning = 1;
1260
1261	/*
1262	 * Find the "default" dir item which points to the root item that we
1263	 * will mount by default if we haven't been given a specific subvolume
1264	 * to mount.
1265	 */
1266	dir_id = btrfs_super_root_dir(fs_info->super_copy);
1267	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
1268	if (IS_ERR(di)) {
1269		btrfs_free_path(path);
1270		return PTR_ERR(di);
1271	}
1272	if (!di) {
1273		/*
1274		 * Ok the default dir item isn't there.  This is weird since
1275		 * it's always been there, but don't freak out, just try and
1276		 * mount the top-level subvolume.
1277		 */
1278		btrfs_free_path(path);
1279		*objectid = BTRFS_FS_TREE_OBJECTID;
1280		return 0;
1281	}
1282
1283	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1284	btrfs_free_path(path);
1285	*objectid = location.objectid;
1286	return 0;
1287}
1288
1289static int btrfs_fill_super(struct super_block *sb,
1290			    struct btrfs_fs_devices *fs_devices,
1291			    void *data)
1292{
1293	struct inode *inode;
1294	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 
1295	int err;
1296
1297	sb->s_maxbytes = MAX_LFS_FILESIZE;
1298	sb->s_magic = BTRFS_SUPER_MAGIC;
1299	sb->s_op = &btrfs_super_ops;
1300	sb->s_d_op = &btrfs_dentry_operations;
1301	sb->s_export_op = &btrfs_export_ops;
1302	sb->s_xattr = btrfs_xattr_handlers;
1303	sb->s_time_gran = 1;
1304#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1305	sb->s_flags |= SB_POSIXACL;
1306#endif
1307	sb->s_flags |= SB_I_VERSION;
1308	sb->s_iflags |= SB_I_CGROUPWB;
1309
1310	err = super_setup_bdi(sb);
1311	if (err) {
1312		btrfs_err(fs_info, "super_setup_bdi failed");
1313		return err;
1314	}
1315
1316	err = open_ctree(sb, fs_devices, (char *)data);
1317	if (err) {
1318		btrfs_err(fs_info, "open_ctree failed");
1319		return err;
1320	}
1321
1322	inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
 
 
 
1323	if (IS_ERR(inode)) {
1324		err = PTR_ERR(inode);
1325		goto fail_close;
1326	}
1327
1328	sb->s_root = d_make_root(inode);
1329	if (!sb->s_root) {
1330		err = -ENOMEM;
1331		goto fail_close;
1332	}
1333
1334	cleancache_init_fs(sb);
1335	sb->s_flags |= SB_ACTIVE;
1336	return 0;
1337
1338fail_close:
1339	close_ctree(fs_info);
1340	return err;
1341}
1342
1343int btrfs_sync_fs(struct super_block *sb, int wait)
1344{
1345	struct btrfs_trans_handle *trans;
1346	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1347	struct btrfs_root *root = fs_info->tree_root;
1348
1349	trace_btrfs_sync_fs(fs_info, wait);
1350
1351	if (!wait) {
1352		filemap_flush(fs_info->btree_inode->i_mapping);
1353		return 0;
1354	}
1355
1356	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1357
1358	trans = btrfs_attach_transaction_barrier(root);
1359	if (IS_ERR(trans)) {
1360		/* no transaction, don't bother */
1361		if (PTR_ERR(trans) == -ENOENT) {
1362			/*
1363			 * Exit unless we have some pending changes
1364			 * that need to go through commit
1365			 */
1366			if (fs_info->pending_changes == 0)
1367				return 0;
1368			/*
1369			 * A non-blocking test if the fs is frozen. We must not
1370			 * start a new transaction here otherwise a deadlock
1371			 * happens. The pending operations are delayed to the
1372			 * next commit after thawing.
1373			 */
1374			if (sb_start_write_trylock(sb))
1375				sb_end_write(sb);
1376			else
1377				return 0;
1378			trans = btrfs_start_transaction(root, 0);
1379		}
1380		if (IS_ERR(trans))
1381			return PTR_ERR(trans);
1382	}
1383	return btrfs_commit_transaction(trans);
1384}
1385
1386static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1387{
1388	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1389	const char *compress_type;
1390	const char *subvol_name;
1391
1392	if (btrfs_test_opt(info, DEGRADED))
1393		seq_puts(seq, ",degraded");
1394	if (btrfs_test_opt(info, NODATASUM))
1395		seq_puts(seq, ",nodatasum");
1396	if (btrfs_test_opt(info, NODATACOW))
1397		seq_puts(seq, ",nodatacow");
1398	if (btrfs_test_opt(info, NOBARRIER))
1399		seq_puts(seq, ",nobarrier");
1400	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1401		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1402	if (info->thread_pool_size !=  min_t(unsigned long,
1403					     num_online_cpus() + 2, 8))
1404		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1405	if (btrfs_test_opt(info, COMPRESS)) {
1406		compress_type = btrfs_compress_type2str(info->compress_type);
1407		if (btrfs_test_opt(info, FORCE_COMPRESS))
1408			seq_printf(seq, ",compress-force=%s", compress_type);
1409		else
1410			seq_printf(seq, ",compress=%s", compress_type);
1411		if (info->compress_level)
1412			seq_printf(seq, ":%d", info->compress_level);
1413	}
1414	if (btrfs_test_opt(info, NOSSD))
1415		seq_puts(seq, ",nossd");
1416	if (btrfs_test_opt(info, SSD_SPREAD))
1417		seq_puts(seq, ",ssd_spread");
1418	else if (btrfs_test_opt(info, SSD))
1419		seq_puts(seq, ",ssd");
1420	if (btrfs_test_opt(info, NOTREELOG))
1421		seq_puts(seq, ",notreelog");
1422	if (btrfs_test_opt(info, NOLOGREPLAY))
1423		seq_puts(seq, ",rescue=nologreplay");
1424	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1425		seq_puts(seq, ",flushoncommit");
1426	if (btrfs_test_opt(info, DISCARD_SYNC))
1427		seq_puts(seq, ",discard");
1428	if (btrfs_test_opt(info, DISCARD_ASYNC))
1429		seq_puts(seq, ",discard=async");
1430	if (!(info->sb->s_flags & SB_POSIXACL))
1431		seq_puts(seq, ",noacl");
1432	if (btrfs_test_opt(info, SPACE_CACHE))
1433		seq_puts(seq, ",space_cache");
1434	else if (btrfs_test_opt(info, FREE_SPACE_TREE))
1435		seq_puts(seq, ",space_cache=v2");
1436	else
1437		seq_puts(seq, ",nospace_cache");
1438	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1439		seq_puts(seq, ",rescan_uuid_tree");
1440	if (btrfs_test_opt(info, CLEAR_CACHE))
1441		seq_puts(seq, ",clear_cache");
1442	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1443		seq_puts(seq, ",user_subvol_rm_allowed");
1444	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1445		seq_puts(seq, ",enospc_debug");
1446	if (btrfs_test_opt(info, AUTO_DEFRAG))
1447		seq_puts(seq, ",autodefrag");
1448	if (btrfs_test_opt(info, INODE_MAP_CACHE))
1449		seq_puts(seq, ",inode_cache");
1450	if (btrfs_test_opt(info, SKIP_BALANCE))
1451		seq_puts(seq, ",skip_balance");
1452#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1453	if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
1454		seq_puts(seq, ",check_int_data");
1455	else if (btrfs_test_opt(info, CHECK_INTEGRITY))
1456		seq_puts(seq, ",check_int");
1457	if (info->check_integrity_print_mask)
1458		seq_printf(seq, ",check_int_print_mask=%d",
1459				info->check_integrity_print_mask);
1460#endif
1461	if (info->metadata_ratio)
1462		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1463	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1464		seq_puts(seq, ",fatal_errors=panic");
1465	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1466		seq_printf(seq, ",commit=%u", info->commit_interval);
1467#ifdef CONFIG_BTRFS_DEBUG
1468	if (btrfs_test_opt(info, FRAGMENT_DATA))
1469		seq_puts(seq, ",fragment=data");
1470	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1471		seq_puts(seq, ",fragment=metadata");
1472#endif
1473	if (btrfs_test_opt(info, REF_VERIFY))
1474		seq_puts(seq, ",ref_verify");
1475	seq_printf(seq, ",subvolid=%llu",
1476		  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1477	subvol_name = btrfs_get_subvol_name_from_objectid(info,
1478			BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1479	if (!IS_ERR(subvol_name)) {
1480		seq_puts(seq, ",subvol=");
1481		seq_escape(seq, subvol_name, " \t\n\\");
1482		kfree(subvol_name);
1483	}
1484	return 0;
1485}
1486
1487static int btrfs_test_super(struct super_block *s, void *data)
1488{
1489	struct btrfs_fs_info *p = data;
1490	struct btrfs_fs_info *fs_info = btrfs_sb(s);
1491
1492	return fs_info->fs_devices == p->fs_devices;
1493}
1494
1495static int btrfs_set_super(struct super_block *s, void *data)
1496{
1497	int err = set_anon_super(s, data);
1498	if (!err)
1499		s->s_fs_info = data;
1500	return err;
1501}
1502
1503/*
1504 * subvolumes are identified by ino 256
1505 */
1506static inline int is_subvolume_inode(struct inode *inode)
1507{
1508	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1509		return 1;
1510	return 0;
1511}
1512
1513static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1514				   struct vfsmount *mnt)
1515{
1516	struct dentry *root;
1517	int ret;
1518
1519	if (!subvol_name) {
1520		if (!subvol_objectid) {
1521			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1522							  &subvol_objectid);
1523			if (ret) {
1524				root = ERR_PTR(ret);
1525				goto out;
1526			}
1527		}
1528		subvol_name = btrfs_get_subvol_name_from_objectid(
1529					btrfs_sb(mnt->mnt_sb), subvol_objectid);
1530		if (IS_ERR(subvol_name)) {
1531			root = ERR_CAST(subvol_name);
1532			subvol_name = NULL;
1533			goto out;
1534		}
1535
1536	}
1537
1538	root = mount_subtree(mnt, subvol_name);
1539	/* mount_subtree() drops our reference on the vfsmount. */
1540	mnt = NULL;
1541
1542	if (!IS_ERR(root)) {
1543		struct super_block *s = root->d_sb;
1544		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1545		struct inode *root_inode = d_inode(root);
1546		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1547
1548		ret = 0;
1549		if (!is_subvolume_inode(root_inode)) {
1550			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1551			       subvol_name);
1552			ret = -EINVAL;
1553		}
1554		if (subvol_objectid && root_objectid != subvol_objectid) {
1555			/*
1556			 * This will also catch a race condition where a
1557			 * subvolume which was passed by ID is renamed and
1558			 * another subvolume is renamed over the old location.
1559			 */
1560			btrfs_err(fs_info,
1561				  "subvol '%s' does not match subvolid %llu",
1562				  subvol_name, subvol_objectid);
1563			ret = -EINVAL;
1564		}
1565		if (ret) {
1566			dput(root);
1567			root = ERR_PTR(ret);
1568			deactivate_locked_super(s);
1569		}
1570	}
1571
1572out:
1573	mntput(mnt);
1574	kfree(subvol_name);
1575	return root;
1576}
1577
1578/*
1579 * Find a superblock for the given device / mount point.
1580 *
1581 * Note: This is based on mount_bdev from fs/super.c with a few additions
1582 *       for multiple device setup.  Make sure to keep it in sync.
1583 */
1584static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
1585		int flags, const char *device_name, void *data)
1586{
1587	struct block_device *bdev = NULL;
1588	struct super_block *s;
1589	struct btrfs_device *device = NULL;
1590	struct btrfs_fs_devices *fs_devices = NULL;
1591	struct btrfs_fs_info *fs_info = NULL;
1592	void *new_sec_opts = NULL;
1593	fmode_t mode = FMODE_READ;
1594	int error = 0;
1595
1596	if (!(flags & SB_RDONLY))
1597		mode |= FMODE_WRITE;
1598
1599	if (data) {
1600		error = security_sb_eat_lsm_opts(data, &new_sec_opts);
1601		if (error)
1602			return ERR_PTR(error);
1603	}
1604
1605	/*
1606	 * Setup a dummy root and fs_info for test/set super.  This is because
1607	 * we don't actually fill this stuff out until open_ctree, but we need
1608	 * then open_ctree will properly initialize the file system specific
1609	 * settings later.  btrfs_init_fs_info initializes the static elements
1610	 * of the fs_info (locks and such) to make cleanup easier if we find a
1611	 * superblock with our given fs_devices later on at sget() time.
1612	 */
1613	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
1614	if (!fs_info) {
1615		error = -ENOMEM;
1616		goto error_sec_opts;
1617	}
1618	btrfs_init_fs_info(fs_info);
1619
1620	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1621	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1622	if (!fs_info->super_copy || !fs_info->super_for_commit) {
1623		error = -ENOMEM;
1624		goto error_fs_info;
1625	}
1626
1627	mutex_lock(&uuid_mutex);
1628	error = btrfs_parse_device_options(data, mode, fs_type);
1629	if (error) {
1630		mutex_unlock(&uuid_mutex);
1631		goto error_fs_info;
1632	}
1633
1634	device = btrfs_scan_one_device(device_name, mode, fs_type);
1635	if (IS_ERR(device)) {
1636		mutex_unlock(&uuid_mutex);
1637		error = PTR_ERR(device);
1638		goto error_fs_info;
1639	}
1640
1641	fs_devices = device->fs_devices;
1642	fs_info->fs_devices = fs_devices;
1643
1644	error = btrfs_open_devices(fs_devices, mode, fs_type);
1645	mutex_unlock(&uuid_mutex);
1646	if (error)
1647		goto error_fs_info;
1648
1649	if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1650		error = -EACCES;
1651		goto error_close_devices;
1652	}
1653
1654	bdev = fs_devices->latest_bdev;
1655	s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
1656		 fs_info);
1657	if (IS_ERR(s)) {
1658		error = PTR_ERR(s);
1659		goto error_close_devices;
1660	}
1661
1662	if (s->s_root) {
1663		btrfs_close_devices(fs_devices);
1664		btrfs_free_fs_info(fs_info);
1665		if ((flags ^ s->s_flags) & SB_RDONLY)
1666			error = -EBUSY;
1667	} else {
1668		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1669		btrfs_sb(s)->bdev_holder = fs_type;
1670		if (!strstr(crc32c_impl(), "generic"))
1671			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1672		error = btrfs_fill_super(s, fs_devices, data);
1673	}
1674	if (!error)
1675		error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL);
1676	security_free_mnt_opts(&new_sec_opts);
1677	if (error) {
1678		deactivate_locked_super(s);
1679		return ERR_PTR(error);
1680	}
1681
1682	return dget(s->s_root);
1683
1684error_close_devices:
1685	btrfs_close_devices(fs_devices);
1686error_fs_info:
1687	btrfs_free_fs_info(fs_info);
1688error_sec_opts:
1689	security_free_mnt_opts(&new_sec_opts);
1690	return ERR_PTR(error);
1691}
1692
1693/*
1694 * Mount function which is called by VFS layer.
1695 *
1696 * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
1697 * which needs vfsmount* of device's root (/).  This means device's root has to
1698 * be mounted internally in any case.
1699 *
1700 * Operation flow:
1701 *   1. Parse subvol id related options for later use in mount_subvol().
1702 *
1703 *   2. Mount device's root (/) by calling vfs_kern_mount().
1704 *
1705 *      NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
1706 *      first place. In order to avoid calling btrfs_mount() again, we use
1707 *      different file_system_type which is not registered to VFS by
1708 *      register_filesystem() (btrfs_root_fs_type). As a result,
1709 *      btrfs_mount_root() is called. The return value will be used by
1710 *      mount_subtree() in mount_subvol().
1711 *
1712 *   3. Call mount_subvol() to get the dentry of subvolume. Since there is
1713 *      "btrfs subvolume set-default", mount_subvol() is called always.
1714 */
1715static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1716		const char *device_name, void *data)
1717{
1718	struct vfsmount *mnt_root;
1719	struct dentry *root;
1720	char *subvol_name = NULL;
1721	u64 subvol_objectid = 0;
1722	int error = 0;
1723
1724	error = btrfs_parse_subvol_options(data, &subvol_name,
1725					&subvol_objectid);
1726	if (error) {
1727		kfree(subvol_name);
1728		return ERR_PTR(error);
1729	}
1730
1731	/* mount device's root (/) */
1732	mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
1733	if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
1734		if (flags & SB_RDONLY) {
1735			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1736				flags & ~SB_RDONLY, device_name, data);
1737		} else {
1738			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1739				flags | SB_RDONLY, device_name, data);
1740			if (IS_ERR(mnt_root)) {
1741				root = ERR_CAST(mnt_root);
1742				kfree(subvol_name);
1743				goto out;
1744			}
1745
1746			down_write(&mnt_root->mnt_sb->s_umount);
1747			error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
1748			up_write(&mnt_root->mnt_sb->s_umount);
1749			if (error < 0) {
1750				root = ERR_PTR(error);
1751				mntput(mnt_root);
1752				kfree(subvol_name);
1753				goto out;
1754			}
1755		}
1756	}
1757	if (IS_ERR(mnt_root)) {
1758		root = ERR_CAST(mnt_root);
1759		kfree(subvol_name);
1760		goto out;
1761	}
1762
1763	/* mount_subvol() will free subvol_name and mnt_root */
1764	root = mount_subvol(subvol_name, subvol_objectid, mnt_root);
1765
1766out:
1767	return root;
1768}
1769
1770static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1771				     u32 new_pool_size, u32 old_pool_size)
1772{
1773	if (new_pool_size == old_pool_size)
1774		return;
1775
1776	fs_info->thread_pool_size = new_pool_size;
1777
1778	btrfs_info(fs_info, "resize thread pool %d -> %d",
1779	       old_pool_size, new_pool_size);
1780
1781	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1782	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
 
1783	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1784	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
1785	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
1786	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
1787				new_pool_size);
1788	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1789	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1790	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1791	btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
1792	btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
1793				new_pool_size);
1794}
1795
 
 
 
 
 
1796static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1797				       unsigned long old_opts, int flags)
1798{
1799	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1800	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1801	     (flags & SB_RDONLY))) {
1802		/* wait for any defraggers to finish */
1803		wait_event(fs_info->transaction_wait,
1804			   (atomic_read(&fs_info->defrag_running) == 0));
1805		if (flags & SB_RDONLY)
1806			sync_filesystem(fs_info->sb);
1807	}
1808}
1809
1810static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1811					 unsigned long old_opts)
1812{
1813	/*
1814	 * We need to cleanup all defragable inodes if the autodefragment is
1815	 * close or the filesystem is read only.
1816	 */
1817	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1818	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1819		btrfs_cleanup_defrag_inodes(fs_info);
1820	}
1821
1822	/* If we toggled discard async */
1823	if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1824	    btrfs_test_opt(fs_info, DISCARD_ASYNC))
1825		btrfs_discard_resume(fs_info);
1826	else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1827		 !btrfs_test_opt(fs_info, DISCARD_ASYNC))
1828		btrfs_discard_cleanup(fs_info);
1829}
1830
1831static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1832{
1833	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1834	struct btrfs_root *root = fs_info->tree_root;
1835	unsigned old_flags = sb->s_flags;
1836	unsigned long old_opts = fs_info->mount_opt;
1837	unsigned long old_compress_type = fs_info->compress_type;
1838	u64 old_max_inline = fs_info->max_inline;
1839	u32 old_thread_pool_size = fs_info->thread_pool_size;
1840	u32 old_metadata_ratio = fs_info->metadata_ratio;
1841	int ret;
1842
1843	sync_filesystem(sb);
1844	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1845
1846	if (data) {
1847		void *new_sec_opts = NULL;
1848
1849		ret = security_sb_eat_lsm_opts(data, &new_sec_opts);
1850		if (!ret)
1851			ret = security_sb_remount(sb, new_sec_opts);
1852		security_free_mnt_opts(&new_sec_opts);
1853		if (ret)
1854			goto restore;
1855	}
1856
1857	ret = btrfs_parse_options(fs_info, data, *flags);
1858	if (ret)
1859		goto restore;
1860
1861	btrfs_remount_begin(fs_info, old_opts, *flags);
1862	btrfs_resize_thread_pool(fs_info,
1863		fs_info->thread_pool_size, old_thread_pool_size);
1864
1865	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
1866		goto out;
1867
1868	if (*flags & SB_RDONLY) {
1869		/*
1870		 * this also happens on 'umount -rf' or on shutdown, when
1871		 * the filesystem is busy.
1872		 */
1873		cancel_work_sync(&fs_info->async_reclaim_work);
1874
1875		btrfs_discard_cleanup(fs_info);
1876
1877		/* wait for the uuid_scan task to finish */
1878		down(&fs_info->uuid_tree_rescan_sem);
1879		/* avoid complains from lockdep et al. */
1880		up(&fs_info->uuid_tree_rescan_sem);
1881
1882		sb->s_flags |= SB_RDONLY;
1883
1884		/*
1885		 * Setting SB_RDONLY will put the cleaner thread to
1886		 * sleep at the next loop if it's already active.
1887		 * If it's already asleep, we'll leave unused block
1888		 * groups on disk until we're mounted read-write again
1889		 * unless we clean them up here.
1890		 */
1891		btrfs_delete_unused_bgs(fs_info);
1892
1893		btrfs_dev_replace_suspend_for_unmount(fs_info);
1894		btrfs_scrub_cancel(fs_info);
1895		btrfs_pause_balance(fs_info);
1896
1897		ret = btrfs_commit_super(fs_info);
1898		if (ret)
1899			goto restore;
1900	} else {
1901		if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1902			btrfs_err(fs_info,
1903				"Remounting read-write after error is not allowed");
1904			ret = -EINVAL;
1905			goto restore;
1906		}
1907		if (fs_info->fs_devices->rw_devices == 0) {
1908			ret = -EACCES;
1909			goto restore;
1910		}
1911
1912		if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1913			btrfs_warn(fs_info,
1914		"too many missing devices, writable remount is not allowed");
1915			ret = -EACCES;
1916			goto restore;
1917		}
1918
1919		if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1920			btrfs_warn(fs_info,
1921		"mount required to replay tree-log, cannot remount read-write");
1922			ret = -EINVAL;
1923			goto restore;
1924		}
1925
1926		ret = btrfs_cleanup_fs_roots(fs_info);
1927		if (ret)
1928			goto restore;
1929
1930		/* recover relocation */
1931		mutex_lock(&fs_info->cleaner_mutex);
1932		ret = btrfs_recover_relocation(root);
1933		mutex_unlock(&fs_info->cleaner_mutex);
1934		if (ret)
1935			goto restore;
1936
1937		ret = btrfs_resume_balance_async(fs_info);
1938		if (ret)
1939			goto restore;
1940
1941		ret = btrfs_resume_dev_replace_async(fs_info);
1942		if (ret) {
1943			btrfs_warn(fs_info, "failed to resume dev_replace");
1944			goto restore;
1945		}
1946
1947		btrfs_qgroup_rescan_resume(fs_info);
1948
1949		if (!fs_info->uuid_root) {
1950			btrfs_info(fs_info, "creating UUID tree");
1951			ret = btrfs_create_uuid_tree(fs_info);
1952			if (ret) {
1953				btrfs_warn(fs_info,
1954					   "failed to create the UUID tree %d",
1955					   ret);
1956				goto restore;
1957			}
1958		}
1959		sb->s_flags &= ~SB_RDONLY;
1960
1961		set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1962	}
1963out:
1964	/*
1965	 * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
1966	 * since the absence of the flag means it can be toggled off by remount.
1967	 */
1968	*flags |= SB_I_VERSION;
1969
1970	wake_up_process(fs_info->transaction_kthread);
1971	btrfs_remount_cleanup(fs_info, old_opts);
1972	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1973
1974	return 0;
1975
1976restore:
1977	/* We've hit an error - don't reset SB_RDONLY */
1978	if (sb_rdonly(sb))
1979		old_flags |= SB_RDONLY;
1980	sb->s_flags = old_flags;
1981	fs_info->mount_opt = old_opts;
1982	fs_info->compress_type = old_compress_type;
1983	fs_info->max_inline = old_max_inline;
1984	btrfs_resize_thread_pool(fs_info,
1985		old_thread_pool_size, fs_info->thread_pool_size);
1986	fs_info->metadata_ratio = old_metadata_ratio;
1987	btrfs_remount_cleanup(fs_info, old_opts);
1988	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1989
1990	return ret;
1991}
1992
1993/* Used to sort the devices by max_avail(descending sort) */
1994static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
1995				       const void *dev_info2)
1996{
1997	if (((struct btrfs_device_info *)dev_info1)->max_avail >
1998	    ((struct btrfs_device_info *)dev_info2)->max_avail)
1999		return -1;
2000	else if (((struct btrfs_device_info *)dev_info1)->max_avail <
2001		 ((struct btrfs_device_info *)dev_info2)->max_avail)
2002		return 1;
2003	else
2004	return 0;
2005}
2006
2007/*
2008 * sort the devices by max_avail, in which max free extent size of each device
2009 * is stored.(Descending Sort)
2010 */
2011static inline void btrfs_descending_sort_devices(
2012					struct btrfs_device_info *devices,
2013					size_t nr_devices)
2014{
2015	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
2016	     btrfs_cmp_device_free_bytes, NULL);
2017}
2018
2019/*
2020 * The helper to calc the free space on the devices that can be used to store
2021 * file data.
2022 */
2023static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
2024					      u64 *free_bytes)
2025{
2026	struct btrfs_device_info *devices_info;
2027	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2028	struct btrfs_device *device;
2029	u64 type;
2030	u64 avail_space;
2031	u64 min_stripe_size;
2032	int num_stripes = 1;
2033	int i = 0, nr_devices;
2034	const struct btrfs_raid_attr *rattr;
2035
2036	/*
2037	 * We aren't under the device list lock, so this is racy-ish, but good
2038	 * enough for our purposes.
2039	 */
2040	nr_devices = fs_info->fs_devices->open_devices;
2041	if (!nr_devices) {
2042		smp_mb();
2043		nr_devices = fs_info->fs_devices->open_devices;
2044		ASSERT(nr_devices);
2045		if (!nr_devices) {
2046			*free_bytes = 0;
2047			return 0;
2048		}
2049	}
2050
2051	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
2052			       GFP_KERNEL);
2053	if (!devices_info)
2054		return -ENOMEM;
2055
2056	/* calc min stripe number for data space allocation */
2057	type = btrfs_data_alloc_profile(fs_info);
2058	rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
2059
2060	if (type & BTRFS_BLOCK_GROUP_RAID0)
2061		num_stripes = nr_devices;
2062	else if (type & BTRFS_BLOCK_GROUP_RAID1)
2063		num_stripes = 2;
2064	else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
2065		num_stripes = 3;
2066	else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
2067		num_stripes = 4;
2068	else if (type & BTRFS_BLOCK_GROUP_RAID10)
2069		num_stripes = 4;
2070
2071	/* Adjust for more than 1 stripe per device */
2072	min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
2073
2074	rcu_read_lock();
2075	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2076		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2077						&device->dev_state) ||
2078		    !device->bdev ||
2079		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2080			continue;
2081
2082		if (i >= nr_devices)
2083			break;
2084
2085		avail_space = device->total_bytes - device->bytes_used;
2086
2087		/* align with stripe_len */
2088		avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
2089
2090		/*
2091		 * In order to avoid overwriting the superblock on the drive,
2092		 * btrfs starts at an offset of at least 1MB when doing chunk
2093		 * allocation.
2094		 *
2095		 * This ensures we have at least min_stripe_size free space
2096		 * after excluding 1MB.
2097		 */
2098		if (avail_space <= SZ_1M + min_stripe_size)
2099			continue;
2100
2101		avail_space -= SZ_1M;
2102
2103		devices_info[i].dev = device;
2104		devices_info[i].max_avail = avail_space;
2105
2106		i++;
2107	}
2108	rcu_read_unlock();
2109
2110	nr_devices = i;
2111
2112	btrfs_descending_sort_devices(devices_info, nr_devices);
2113
2114	i = nr_devices - 1;
2115	avail_space = 0;
2116	while (nr_devices >= rattr->devs_min) {
2117		num_stripes = min(num_stripes, nr_devices);
2118
2119		if (devices_info[i].max_avail >= min_stripe_size) {
2120			int j;
2121			u64 alloc_size;
2122
2123			avail_space += devices_info[i].max_avail * num_stripes;
2124			alloc_size = devices_info[i].max_avail;
2125			for (j = i + 1 - num_stripes; j <= i; j++)
2126				devices_info[j].max_avail -= alloc_size;
2127		}
2128		i--;
2129		nr_devices--;
2130	}
2131
2132	kfree(devices_info);
2133	*free_bytes = avail_space;
2134	return 0;
2135}
2136
2137/*
2138 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
2139 *
2140 * If there's a redundant raid level at DATA block groups, use the respective
2141 * multiplier to scale the sizes.
2142 *
2143 * Unused device space usage is based on simulating the chunk allocator
2144 * algorithm that respects the device sizes and order of allocations.  This is
2145 * a close approximation of the actual use but there are other factors that may
2146 * change the result (like a new metadata chunk).
2147 *
2148 * If metadata is exhausted, f_bavail will be 0.
2149 */
2150static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2151{
2152	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
2153	struct btrfs_super_block *disk_super = fs_info->super_copy;
 
2154	struct btrfs_space_info *found;
2155	u64 total_used = 0;
2156	u64 total_free_data = 0;
2157	u64 total_free_meta = 0;
2158	int bits = dentry->d_sb->s_blocksize_bits;
2159	__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
2160	unsigned factor = 1;
2161	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
2162	int ret;
2163	u64 thresh = 0;
2164	int mixed = 0;
2165
2166	rcu_read_lock();
2167	list_for_each_entry_rcu(found, &fs_info->space_info, list) {
2168		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
2169			int i;
2170
2171			total_free_data += found->disk_total - found->disk_used;
2172			total_free_data -=
2173				btrfs_account_ro_block_groups_free_space(found);
2174
2175			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2176				if (!list_empty(&found->block_groups[i]))
2177					factor = btrfs_bg_type_to_factor(
2178						btrfs_raid_array[i].bg_flag);
2179			}
2180		}
2181
2182		/*
2183		 * Metadata in mixed block goup profiles are accounted in data
2184		 */
2185		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
2186			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
2187				mixed = 1;
2188			else
2189				total_free_meta += found->disk_total -
2190					found->disk_used;
2191		}
2192
2193		total_used += found->disk_used;
2194	}
2195
2196	rcu_read_unlock();
2197
2198	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
2199	buf->f_blocks >>= bits;
2200	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
2201
2202	/* Account global block reserve as used, it's in logical size already */
2203	spin_lock(&block_rsv->lock);
2204	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
2205	if (buf->f_bfree >= block_rsv->size >> bits)
2206		buf->f_bfree -= block_rsv->size >> bits;
2207	else
2208		buf->f_bfree = 0;
2209	spin_unlock(&block_rsv->lock);
2210
2211	buf->f_bavail = div_u64(total_free_data, factor);
2212	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
2213	if (ret)
2214		return ret;
2215	buf->f_bavail += div_u64(total_free_data, factor);
2216	buf->f_bavail = buf->f_bavail >> bits;
2217
2218	/*
2219	 * We calculate the remaining metadata space minus global reserve. If
2220	 * this is (supposedly) smaller than zero, there's no space. But this
2221	 * does not hold in practice, the exhausted state happens where's still
2222	 * some positive delta. So we apply some guesswork and compare the
2223	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
2224	 *
2225	 * We probably cannot calculate the exact threshold value because this
2226	 * depends on the internal reservations requested by various
2227	 * operations, so some operations that consume a few metadata will
2228	 * succeed even if the Avail is zero. But this is better than the other
2229	 * way around.
2230	 */
2231	thresh = SZ_4M;
2232
2233	/*
2234	 * We only want to claim there's no available space if we can no longer
2235	 * allocate chunks for our metadata profile and our global reserve will
2236	 * not fit in the free metadata space.  If we aren't ->full then we
2237	 * still can allocate chunks and thus are fine using the currently
2238	 * calculated f_bavail.
2239	 */
2240	if (!mixed && block_rsv->space_info->full &&
2241	    total_free_meta - thresh < block_rsv->size)
2242		buf->f_bavail = 0;
2243
2244	buf->f_type = BTRFS_SUPER_MAGIC;
2245	buf->f_bsize = dentry->d_sb->s_blocksize;
2246	buf->f_namelen = BTRFS_NAME_LEN;
2247
2248	/* We treat it as constant endianness (it doesn't matter _which_)
2249	   because we want the fsid to come out the same whether mounted
2250	   on a big-endian or little-endian host */
2251	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
2252	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
2253	/* Mask in the root object ID too, to disambiguate subvols */
2254	buf->f_fsid.val[0] ^=
2255		BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
2256	buf->f_fsid.val[1] ^=
2257		BTRFS_I(d_inode(dentry))->root->root_key.objectid;
2258
2259	return 0;
2260}
2261
2262static void btrfs_kill_super(struct super_block *sb)
2263{
2264	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2265	kill_anon_super(sb);
2266	btrfs_free_fs_info(fs_info);
2267}
2268
2269static struct file_system_type btrfs_fs_type = {
2270	.owner		= THIS_MODULE,
2271	.name		= "btrfs",
2272	.mount		= btrfs_mount,
2273	.kill_sb	= btrfs_kill_super,
2274	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2275};
2276
2277static struct file_system_type btrfs_root_fs_type = {
2278	.owner		= THIS_MODULE,
2279	.name		= "btrfs",
2280	.mount		= btrfs_mount_root,
2281	.kill_sb	= btrfs_kill_super,
2282	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2283};
2284
2285MODULE_ALIAS_FS("btrfs");
2286
2287static int btrfs_control_open(struct inode *inode, struct file *file)
2288{
2289	/*
2290	 * The control file's private_data is used to hold the
2291	 * transaction when it is started and is used to keep
2292	 * track of whether a transaction is already in progress.
2293	 */
2294	file->private_data = NULL;
2295	return 0;
2296}
2297
2298/*
2299 * Used by /dev/btrfs-control for devices ioctls.
2300 */
2301static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2302				unsigned long arg)
2303{
2304	struct btrfs_ioctl_vol_args *vol;
2305	struct btrfs_device *device = NULL;
2306	int ret = -ENOTTY;
2307
2308	if (!capable(CAP_SYS_ADMIN))
2309		return -EPERM;
2310
2311	vol = memdup_user((void __user *)arg, sizeof(*vol));
2312	if (IS_ERR(vol))
2313		return PTR_ERR(vol);
2314	vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2315
2316	switch (cmd) {
2317	case BTRFS_IOC_SCAN_DEV:
2318		mutex_lock(&uuid_mutex);
2319		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2320					       &btrfs_root_fs_type);
2321		ret = PTR_ERR_OR_ZERO(device);
2322		mutex_unlock(&uuid_mutex);
2323		break;
2324	case BTRFS_IOC_FORGET_DEV:
2325		ret = btrfs_forget_devices(vol->name);
2326		break;
2327	case BTRFS_IOC_DEVICES_READY:
2328		mutex_lock(&uuid_mutex);
2329		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2330					       &btrfs_root_fs_type);
2331		if (IS_ERR(device)) {
2332			mutex_unlock(&uuid_mutex);
2333			ret = PTR_ERR(device);
2334			break;
2335		}
2336		ret = !(device->fs_devices->num_devices ==
2337			device->fs_devices->total_devices);
2338		mutex_unlock(&uuid_mutex);
2339		break;
2340	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2341		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2342		break;
2343	}
2344
2345	kfree(vol);
2346	return ret;
2347}
2348
2349static int btrfs_freeze(struct super_block *sb)
2350{
2351	struct btrfs_trans_handle *trans;
2352	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2353	struct btrfs_root *root = fs_info->tree_root;
2354
2355	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2356	/*
2357	 * We don't need a barrier here, we'll wait for any transaction that
2358	 * could be in progress on other threads (and do delayed iputs that
2359	 * we want to avoid on a frozen filesystem), or do the commit
2360	 * ourselves.
2361	 */
2362	trans = btrfs_attach_transaction_barrier(root);
2363	if (IS_ERR(trans)) {
2364		/* no transaction, don't bother */
2365		if (PTR_ERR(trans) == -ENOENT)
2366			return 0;
2367		return PTR_ERR(trans);
2368	}
2369	return btrfs_commit_transaction(trans);
2370}
2371
2372static int btrfs_unfreeze(struct super_block *sb)
2373{
2374	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2375
2376	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2377	return 0;
2378}
2379
2380static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2381{
2382	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
 
2383	struct btrfs_device *dev, *first_dev = NULL;
 
2384
2385	/*
2386	 * Lightweight locking of the devices. We should not need
2387	 * device_list_mutex here as we only read the device data and the list
2388	 * is protected by RCU.  Even if a device is deleted during the list
2389	 * traversals, we'll get valid data, the freeing callback will wait at
2390	 * least until the rcu_read_unlock.
2391	 */
2392	rcu_read_lock();
2393	list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) {
2394		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2395			continue;
2396		if (!dev->name)
2397			continue;
2398		if (!first_dev || dev->devid < first_dev->devid)
2399			first_dev = dev;
 
 
 
 
 
2400	}
2401
2402	if (first_dev)
2403		seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
2404	else
2405		WARN_ON(1);
2406	rcu_read_unlock();
2407	return 0;
2408}
2409
2410static const struct super_operations btrfs_super_ops = {
2411	.drop_inode	= btrfs_drop_inode,
2412	.evict_inode	= btrfs_evict_inode,
2413	.put_super	= btrfs_put_super,
2414	.sync_fs	= btrfs_sync_fs,
2415	.show_options	= btrfs_show_options,
2416	.show_devname	= btrfs_show_devname,
2417	.alloc_inode	= btrfs_alloc_inode,
2418	.destroy_inode	= btrfs_destroy_inode,
2419	.free_inode	= btrfs_free_inode,
2420	.statfs		= btrfs_statfs,
2421	.remount_fs	= btrfs_remount,
2422	.freeze_fs	= btrfs_freeze,
2423	.unfreeze_fs	= btrfs_unfreeze,
2424};
2425
2426static const struct file_operations btrfs_ctl_fops = {
2427	.open = btrfs_control_open,
2428	.unlocked_ioctl	 = btrfs_control_ioctl,
2429	.compat_ioctl = compat_ptr_ioctl,
2430	.owner	 = THIS_MODULE,
2431	.llseek = noop_llseek,
2432};
2433
2434static struct miscdevice btrfs_misc = {
2435	.minor		= BTRFS_MINOR,
2436	.name		= "btrfs-control",
2437	.fops		= &btrfs_ctl_fops
2438};
2439
2440MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2441MODULE_ALIAS("devname:btrfs-control");
2442
2443static int __init btrfs_interface_init(void)
2444{
2445	return misc_register(&btrfs_misc);
2446}
2447
2448static __cold void btrfs_interface_exit(void)
2449{
2450	misc_deregister(&btrfs_misc);
2451}
2452
2453static void __init btrfs_print_mod_info(void)
2454{
2455	static const char options[] = ""
2456#ifdef CONFIG_BTRFS_DEBUG
2457			", debug=on"
2458#endif
2459#ifdef CONFIG_BTRFS_ASSERT
2460			", assert=on"
2461#endif
2462#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2463			", integrity-checker=on"
2464#endif
2465#ifdef CONFIG_BTRFS_FS_REF_VERIFY
2466			", ref-verify=on"
2467#endif
2468			;
2469	pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
2470}
2471
2472static int __init init_btrfs_fs(void)
2473{
2474	int err;
2475
2476	btrfs_props_init();
2477
2478	err = btrfs_init_sysfs();
2479	if (err)
2480		return err;
2481
2482	btrfs_init_compress();
2483
2484	err = btrfs_init_cachep();
2485	if (err)
2486		goto free_compress;
2487
2488	err = extent_io_init();
2489	if (err)
2490		goto free_cachep;
2491
2492	err = extent_state_cache_init();
2493	if (err)
2494		goto free_extent_io;
2495
2496	err = extent_map_init();
2497	if (err)
2498		goto free_extent_state_cache;
2499
2500	err = ordered_data_init();
2501	if (err)
2502		goto free_extent_map;
2503
2504	err = btrfs_delayed_inode_init();
2505	if (err)
2506		goto free_ordered_data;
2507
2508	err = btrfs_auto_defrag_init();
2509	if (err)
2510		goto free_delayed_inode;
2511
2512	err = btrfs_delayed_ref_init();
2513	if (err)
2514		goto free_auto_defrag;
2515
2516	err = btrfs_prelim_ref_init();
2517	if (err)
2518		goto free_delayed_ref;
2519
2520	err = btrfs_end_io_wq_init();
2521	if (err)
2522		goto free_prelim_ref;
2523
2524	err = btrfs_interface_init();
2525	if (err)
2526		goto free_end_io_wq;
2527
2528	btrfs_init_lockdep();
2529
2530	btrfs_print_mod_info();
2531
2532	err = btrfs_run_sanity_tests();
2533	if (err)
2534		goto unregister_ioctl;
2535
2536	err = register_filesystem(&btrfs_fs_type);
2537	if (err)
2538		goto unregister_ioctl;
2539
2540	return 0;
2541
2542unregister_ioctl:
2543	btrfs_interface_exit();
2544free_end_io_wq:
2545	btrfs_end_io_wq_exit();
2546free_prelim_ref:
2547	btrfs_prelim_ref_exit();
2548free_delayed_ref:
2549	btrfs_delayed_ref_exit();
2550free_auto_defrag:
2551	btrfs_auto_defrag_exit();
2552free_delayed_inode:
2553	btrfs_delayed_inode_exit();
2554free_ordered_data:
2555	ordered_data_exit();
2556free_extent_map:
2557	extent_map_exit();
2558free_extent_state_cache:
2559	extent_state_cache_exit();
2560free_extent_io:
2561	extent_io_exit();
2562free_cachep:
2563	btrfs_destroy_cachep();
2564free_compress:
2565	btrfs_exit_compress();
2566	btrfs_exit_sysfs();
2567
2568	return err;
2569}
2570
2571static void __exit exit_btrfs_fs(void)
2572{
2573	btrfs_destroy_cachep();
2574	btrfs_delayed_ref_exit();
2575	btrfs_auto_defrag_exit();
2576	btrfs_delayed_inode_exit();
2577	btrfs_prelim_ref_exit();
2578	ordered_data_exit();
2579	extent_map_exit();
2580	extent_state_cache_exit();
2581	extent_io_exit();
2582	btrfs_interface_exit();
2583	btrfs_end_io_wq_exit();
2584	unregister_filesystem(&btrfs_fs_type);
2585	btrfs_exit_sysfs();
2586	btrfs_cleanup_fs_uuids();
2587	btrfs_exit_compress();
2588}
2589
2590late_initcall(init_btrfs_fs);
2591module_exit(exit_btrfs_fs)
2592
2593MODULE_LICENSE("GPL");
2594MODULE_SOFTDEP("pre: crc32c");
2595MODULE_SOFTDEP("pre: xxhash64");
2596MODULE_SOFTDEP("pre: sha256");
2597MODULE_SOFTDEP("pre: blake2b-256");