Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/module.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/highmem.h>
  11#include <linux/time.h>
  12#include <linux/init.h>
  13#include <linux/seq_file.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/mount.h>
  17#include <linux/writeback.h>
  18#include <linux/statfs.h>
  19#include <linux/compat.h>
  20#include <linux/parser.h>
  21#include <linux/ctype.h>
  22#include <linux/namei.h>
  23#include <linux/miscdevice.h>
  24#include <linux/magic.h>
  25#include <linux/slab.h>
 
  26#include <linux/ratelimit.h>
  27#include <linux/crc32c.h>
  28#include <linux/btrfs.h>
  29#include <linux/security.h>
  30#include <linux/fs_parser.h>
  31#include "messages.h"
  32#include "delayed-inode.h"
  33#include "ctree.h"
  34#include "disk-io.h"
  35#include "transaction.h"
  36#include "btrfs_inode.h"
  37#include "print-tree.h"
  38#include "props.h"
  39#include "xattr.h"
  40#include "bio.h"
  41#include "export.h"
  42#include "compression.h"
  43#include "rcu-string.h"
  44#include "dev-replace.h"
  45#include "free-space-cache.h"
  46#include "backref.h"
  47#include "space-info.h"
  48#include "sysfs.h"
  49#include "zoned.h"
  50#include "tests/btrfs-tests.h"
  51#include "block-group.h"
  52#include "discard.h"
  53#include "qgroup.h"
  54#include "raid56.h"
  55#include "fs.h"
  56#include "accessors.h"
  57#include "defrag.h"
  58#include "dir-item.h"
  59#include "ioctl.h"
  60#include "scrub.h"
  61#include "verity.h"
  62#include "super.h"
  63#include "extent-tree.h"
  64#define CREATE_TRACE_POINTS
  65#include <trace/events/btrfs.h>
  66
  67static const struct super_operations btrfs_super_ops;
 
 
 
 
 
 
 
 
 
  68static struct file_system_type btrfs_fs_type;
 
  69
  70static void btrfs_put_super(struct super_block *sb)
 
 
  71{
  72	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
  73
  74	btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
  75	close_ctree(fs_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76}
  77
  78/* Store the mount options related information. */
  79struct btrfs_fs_context {
  80	char *subvol_name;
  81	u64 subvol_objectid;
  82	u64 max_inline;
  83	u32 commit_interval;
  84	u32 metadata_ratio;
  85	u32 thread_pool_size;
  86	unsigned long mount_opt;
  87	unsigned long compress_type:4;
  88	unsigned int compress_level;
  89	refcount_t refs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  90};
  91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92enum {
  93	Opt_acl,
  94	Opt_clear_cache,
  95	Opt_commit_interval,
  96	Opt_compress,
  97	Opt_compress_force,
  98	Opt_compress_force_type,
  99	Opt_compress_type,
 100	Opt_degraded,
 101	Opt_device,
 102	Opt_fatal_errors,
 103	Opt_flushoncommit,
 
 104	Opt_max_inline,
 105	Opt_barrier,
 106	Opt_datacow,
 107	Opt_datasum,
 108	Opt_defrag,
 109	Opt_discard,
 110	Opt_discard_mode,
 
 111	Opt_ratio,
 112	Opt_rescan_uuid_tree,
 113	Opt_skip_balance,
 114	Opt_space_cache,
 115	Opt_space_cache_version,
 116	Opt_ssd,
 117	Opt_ssd_spread,
 118	Opt_subvol,
 119	Opt_subvol_empty,
 120	Opt_subvolid,
 121	Opt_thread_pool,
 122	Opt_treelog,
 
 123	Opt_user_subvol_rm_allowed,
 124
 125	/* Rescue options */
 126	Opt_rescue,
 127	Opt_usebackuproot,
 128	Opt_nologreplay,
 129	Opt_ignorebadroots,
 130	Opt_ignoredatacsums,
 131	Opt_rescue_all,
 132
 133	/* Debugging options */
 134	Opt_enospc_debug,
 
 
 
 135#ifdef CONFIG_BTRFS_DEBUG
 136	Opt_fragment, Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
 137#endif
 138#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 139	Opt_ref_verify,
 140#endif
 141	Opt_err,
 142};
 143
 144enum {
 145	Opt_fatal_errors_panic,
 146	Opt_fatal_errors_bug,
 147};
 148
 149static const struct constant_table btrfs_parameter_fatal_errors[] = {
 150	{ "panic", Opt_fatal_errors_panic },
 151	{ "bug", Opt_fatal_errors_bug },
 152	{}
 153};
 154
 155enum {
 156	Opt_discard_sync,
 157	Opt_discard_async,
 158};
 159
 160static const struct constant_table btrfs_parameter_discard[] = {
 161	{ "sync", Opt_discard_sync },
 162	{ "async", Opt_discard_async },
 163	{}
 164};
 165
 166enum {
 167	Opt_space_cache_v1,
 168	Opt_space_cache_v2,
 169};
 170
 171static const struct constant_table btrfs_parameter_space_cache[] = {
 172	{ "v1", Opt_space_cache_v1 },
 173	{ "v2", Opt_space_cache_v2 },
 174	{}
 175};
 176
 177enum {
 178	Opt_rescue_usebackuproot,
 179	Opt_rescue_nologreplay,
 180	Opt_rescue_ignorebadroots,
 181	Opt_rescue_ignoredatacsums,
 182	Opt_rescue_parameter_all,
 183};
 184
 185static const struct constant_table btrfs_parameter_rescue[] = {
 186	{ "usebackuproot", Opt_rescue_usebackuproot },
 187	{ "nologreplay", Opt_rescue_nologreplay },
 188	{ "ignorebadroots", Opt_rescue_ignorebadroots },
 189	{ "ibadroots", Opt_rescue_ignorebadroots },
 190	{ "ignoredatacsums", Opt_rescue_ignoredatacsums },
 191	{ "idatacsums", Opt_rescue_ignoredatacsums },
 192	{ "all", Opt_rescue_parameter_all },
 193	{}
 194};
 195
 196#ifdef CONFIG_BTRFS_DEBUG
 197enum {
 198	Opt_fragment_parameter_data,
 199	Opt_fragment_parameter_metadata,
 200	Opt_fragment_parameter_all,
 201};
 202
 203static const struct constant_table btrfs_parameter_fragment[] = {
 204	{ "data", Opt_fragment_parameter_data },
 205	{ "metadata", Opt_fragment_parameter_metadata },
 206	{ "all", Opt_fragment_parameter_all },
 207	{}
 208};
 209#endif
 210
 211static const struct fs_parameter_spec btrfs_fs_parameters[] = {
 212	fsparam_flag_no("acl", Opt_acl),
 213	fsparam_flag_no("autodefrag", Opt_defrag),
 214	fsparam_flag_no("barrier", Opt_barrier),
 215	fsparam_flag("clear_cache", Opt_clear_cache),
 216	fsparam_u32("commit", Opt_commit_interval),
 217	fsparam_flag("compress", Opt_compress),
 218	fsparam_string("compress", Opt_compress_type),
 219	fsparam_flag("compress-force", Opt_compress_force),
 220	fsparam_string("compress-force", Opt_compress_force_type),
 221	fsparam_flag_no("datacow", Opt_datacow),
 222	fsparam_flag_no("datasum", Opt_datasum),
 223	fsparam_flag("degraded", Opt_degraded),
 224	fsparam_string("device", Opt_device),
 225	fsparam_flag_no("discard", Opt_discard),
 226	fsparam_enum("discard", Opt_discard_mode, btrfs_parameter_discard),
 227	fsparam_enum("fatal_errors", Opt_fatal_errors, btrfs_parameter_fatal_errors),
 228	fsparam_flag_no("flushoncommit", Opt_flushoncommit),
 229	fsparam_string("max_inline", Opt_max_inline),
 230	fsparam_u32("metadata_ratio", Opt_ratio),
 231	fsparam_flag("rescan_uuid_tree", Opt_rescan_uuid_tree),
 232	fsparam_flag("skip_balance", Opt_skip_balance),
 233	fsparam_flag_no("space_cache", Opt_space_cache),
 234	fsparam_enum("space_cache", Opt_space_cache_version, btrfs_parameter_space_cache),
 235	fsparam_flag_no("ssd", Opt_ssd),
 236	fsparam_flag_no("ssd_spread", Opt_ssd_spread),
 237	fsparam_string("subvol", Opt_subvol),
 238	fsparam_flag("subvol=", Opt_subvol_empty),
 239	fsparam_u64("subvolid", Opt_subvolid),
 240	fsparam_u32("thread_pool", Opt_thread_pool),
 241	fsparam_flag_no("treelog", Opt_treelog),
 242	fsparam_flag("user_subvol_rm_allowed", Opt_user_subvol_rm_allowed),
 243
 244	/* Rescue options. */
 245	fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue),
 246	/* Deprecated, with alias rescue=nologreplay */
 247	__fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL),
 248	/* Deprecated, with alias rescue=usebackuproot */
 249	__fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL),
 250
 251	/* Debugging options. */
 252	fsparam_flag_no("enospc_debug", Opt_enospc_debug),
 
 
 
 
 253#ifdef CONFIG_BTRFS_DEBUG
 254	fsparam_enum("fragment", Opt_fragment, btrfs_parameter_fragment),
 
 
 255#endif
 256#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 257	fsparam_flag("ref_verify", Opt_ref_verify),
 258#endif
 259	{}
 260};
 261
 262/* No support for restricting writes to btrfs devices yet... */
 263static inline blk_mode_t btrfs_open_mode(struct fs_context *fc)
 264{
 265	return sb_open_mode(fc->sb_flags) & ~BLK_OPEN_RESTRICT_WRITES;
 266}
 267
 268static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 269{
 270	struct btrfs_fs_context *ctx = fc->fs_private;
 271	struct fs_parse_result result;
 272	int opt;
 273
 274	opt = fs_parse(fc, btrfs_fs_parameters, param, &result);
 275	if (opt < 0)
 276		return opt;
 277
 278	switch (opt) {
 279	case Opt_degraded:
 280		btrfs_set_opt(ctx->mount_opt, DEGRADED);
 281		break;
 282	case Opt_subvol_empty:
 283		/*
 284		 * This exists because we used to allow it on accident, so we're
 285		 * keeping it to maintain ABI.  See 37becec95ac3 ("Btrfs: allow
 286		 * empty subvol= again").
 287		 */
 288		break;
 289	case Opt_subvol:
 290		kfree(ctx->subvol_name);
 291		ctx->subvol_name = kstrdup(param->string, GFP_KERNEL);
 292		if (!ctx->subvol_name)
 293			return -ENOMEM;
 294		break;
 295	case Opt_subvolid:
 296		ctx->subvol_objectid = result.uint_64;
 
 297
 298		/* subvolid=0 means give me the original fs_tree. */
 299		if (!ctx->subvol_objectid)
 300			ctx->subvol_objectid = BTRFS_FS_TREE_OBJECTID;
 301		break;
 302	case Opt_device: {
 303		struct btrfs_device *device;
 304		blk_mode_t mode = btrfs_open_mode(fc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305
 306		mutex_lock(&uuid_mutex);
 307		device = btrfs_scan_one_device(param->string, mode, false);
 308		mutex_unlock(&uuid_mutex);
 309		if (IS_ERR(device))
 310			return PTR_ERR(device);
 311		break;
 312	}
 313	case Opt_datasum:
 314		if (result.negated) {
 315			btrfs_set_opt(ctx->mount_opt, NODATASUM);
 316		} else {
 317			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
 318			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
 319		}
 320		break;
 321	case Opt_datacow:
 322		if (result.negated) {
 323			btrfs_clear_opt(ctx->mount_opt, COMPRESS);
 324			btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
 325			btrfs_set_opt(ctx->mount_opt, NODATACOW);
 326			btrfs_set_opt(ctx->mount_opt, NODATASUM);
 327		} else {
 328			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
 329		}
 330		break;
 331	case Opt_compress_force:
 332	case Opt_compress_force_type:
 333		btrfs_set_opt(ctx->mount_opt, FORCE_COMPRESS);
 334		fallthrough;
 335	case Opt_compress:
 336	case Opt_compress_type:
 337		if (opt == Opt_compress || opt == Opt_compress_force) {
 338			ctx->compress_type = BTRFS_COMPRESS_ZLIB;
 339			ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
 340			btrfs_set_opt(ctx->mount_opt, COMPRESS);
 341			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
 342			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
 343		} else if (strncmp(param->string, "zlib", 4) == 0) {
 344			ctx->compress_type = BTRFS_COMPRESS_ZLIB;
 345			ctx->compress_level =
 346				btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB,
 347							 param->string + 4);
 348			btrfs_set_opt(ctx->mount_opt, COMPRESS);
 349			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
 350			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
 351		} else if (strncmp(param->string, "lzo", 3) == 0) {
 352			ctx->compress_type = BTRFS_COMPRESS_LZO;
 353			ctx->compress_level = 0;
 354			btrfs_set_opt(ctx->mount_opt, COMPRESS);
 355			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
 356			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
 357		} else if (strncmp(param->string, "zstd", 4) == 0) {
 358			ctx->compress_type = BTRFS_COMPRESS_ZSTD;
 359			ctx->compress_level =
 360				btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD,
 361							 param->string + 4);
 362			btrfs_set_opt(ctx->mount_opt, COMPRESS);
 363			btrfs_clear_opt(ctx->mount_opt, NODATACOW);
 364			btrfs_clear_opt(ctx->mount_opt, NODATASUM);
 365		} else if (strncmp(param->string, "no", 2) == 0) {
 366			ctx->compress_level = 0;
 367			ctx->compress_type = 0;
 368			btrfs_clear_opt(ctx->mount_opt, COMPRESS);
 369			btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS);
 370		} else {
 371			btrfs_err(NULL, "unrecognized compression value %s",
 372				  param->string);
 373			return -EINVAL;
 374		}
 375		break;
 376	case Opt_ssd:
 377		if (result.negated) {
 378			btrfs_set_opt(ctx->mount_opt, NOSSD);
 379			btrfs_clear_opt(ctx->mount_opt, SSD);
 380			btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD);
 381		} else {
 382			btrfs_set_opt(ctx->mount_opt, SSD);
 383			btrfs_clear_opt(ctx->mount_opt, NOSSD);
 384		}
 385		break;
 386	case Opt_ssd_spread:
 387		if (result.negated) {
 388			btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD);
 389		} else {
 390			btrfs_set_opt(ctx->mount_opt, SSD);
 391			btrfs_set_opt(ctx->mount_opt, SSD_SPREAD);
 392			btrfs_clear_opt(ctx->mount_opt, NOSSD);
 393		}
 394		break;
 395	case Opt_barrier:
 396		if (result.negated)
 397			btrfs_set_opt(ctx->mount_opt, NOBARRIER);
 398		else
 399			btrfs_clear_opt(ctx->mount_opt, NOBARRIER);
 400		break;
 401	case Opt_thread_pool:
 402		if (result.uint_32 == 0) {
 403			btrfs_err(NULL, "invalid value 0 for thread_pool");
 404			return -EINVAL;
 405		}
 406		ctx->thread_pool_size = result.uint_32;
 407		break;
 408	case Opt_max_inline:
 409		ctx->max_inline = memparse(param->string, NULL);
 410		break;
 411	case Opt_acl:
 412		if (result.negated) {
 413			fc->sb_flags &= ~SB_POSIXACL;
 414		} else {
 415#ifdef CONFIG_BTRFS_FS_POSIX_ACL
 416			fc->sb_flags |= SB_POSIXACL;
 
 417#else
 418			btrfs_err(NULL, "support for ACL not compiled in");
 419			return -EINVAL;
 
 420#endif
 421		}
 422		/*
 423		 * VFS limits the ability to toggle ACL on and off via remount,
 424		 * despite every file system allowing this.  This seems to be
 425		 * an oversight since we all do, but it'll fail if we're
 426		 * remounting.  So don't set the mask here, we'll check it in
 427		 * btrfs_reconfigure and do the toggling ourselves.
 428		 */
 429		if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE)
 430			fc->sb_flags_mask |= SB_POSIXACL;
 431		break;
 432	case Opt_treelog:
 433		if (result.negated)
 434			btrfs_set_opt(ctx->mount_opt, NOTREELOG);
 435		else
 436			btrfs_clear_opt(ctx->mount_opt, NOTREELOG);
 437		break;
 438	case Opt_nologreplay:
 439		btrfs_warn(NULL,
 440		"'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
 441		btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
 442		break;
 443	case Opt_flushoncommit:
 444		if (result.negated)
 445			btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT);
 446		else
 447			btrfs_set_opt(ctx->mount_opt, FLUSHONCOMMIT);
 448		break;
 449	case Opt_ratio:
 450		ctx->metadata_ratio = result.uint_32;
 451		break;
 452	case Opt_discard:
 453		if (result.negated) {
 454			btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC);
 455			btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
 456			btrfs_set_opt(ctx->mount_opt, NODISCARD);
 457		} else {
 458			btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC);
 459			btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
 460		}
 461		break;
 462	case Opt_discard_mode:
 463		switch (result.uint_32) {
 464		case Opt_discard_sync:
 465			btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC);
 466			btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC);
 467			break;
 468		case Opt_discard_async:
 469			btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC);
 470			btrfs_set_opt(ctx->mount_opt, DISCARD_ASYNC);
 471			break;
 472		default:
 473			btrfs_err(NULL, "unrecognized discard mode value %s",
 474				  param->key);
 475			return -EINVAL;
 476		}
 477		btrfs_clear_opt(ctx->mount_opt, NODISCARD);
 478		break;
 479	case Opt_space_cache:
 480		if (result.negated) {
 481			btrfs_set_opt(ctx->mount_opt, NOSPACECACHE);
 482			btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE);
 483			btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
 484		} else {
 485			btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
 486			btrfs_set_opt(ctx->mount_opt, SPACE_CACHE);
 487		}
 488		break;
 489	case Opt_space_cache_version:
 490		switch (result.uint_32) {
 491		case Opt_space_cache_v1:
 492			btrfs_set_opt(ctx->mount_opt, SPACE_CACHE);
 493			btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE);
 494			break;
 495		case Opt_space_cache_v2:
 496			btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE);
 497			btrfs_set_opt(ctx->mount_opt, FREE_SPACE_TREE);
 498			break;
 499		default:
 500			btrfs_err(NULL, "unrecognized space_cache value %s",
 501				  param->key);
 502			return -EINVAL;
 503		}
 504		break;
 505	case Opt_rescan_uuid_tree:
 506		btrfs_set_opt(ctx->mount_opt, RESCAN_UUID_TREE);
 507		break;
 508	case Opt_clear_cache:
 509		btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE);
 510		break;
 511	case Opt_user_subvol_rm_allowed:
 512		btrfs_set_opt(ctx->mount_opt, USER_SUBVOL_RM_ALLOWED);
 513		break;
 514	case Opt_enospc_debug:
 515		if (result.negated)
 516			btrfs_clear_opt(ctx->mount_opt, ENOSPC_DEBUG);
 517		else
 518			btrfs_set_opt(ctx->mount_opt, ENOSPC_DEBUG);
 519		break;
 520	case Opt_defrag:
 521		if (result.negated)
 522			btrfs_clear_opt(ctx->mount_opt, AUTO_DEFRAG);
 523		else
 524			btrfs_set_opt(ctx->mount_opt, AUTO_DEFRAG);
 525		break;
 526	case Opt_usebackuproot:
 527		btrfs_warn(NULL,
 528			   "'usebackuproot' is deprecated, use 'rescue=usebackuproot' instead");
 529		btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT);
 530
 531		/* If we're loading the backup roots we can't trust the space cache. */
 532		btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE);
 533		break;
 534	case Opt_skip_balance:
 535		btrfs_set_opt(ctx->mount_opt, SKIP_BALANCE);
 536		break;
 537	case Opt_fatal_errors:
 538		switch (result.uint_32) {
 539		case Opt_fatal_errors_panic:
 540			btrfs_set_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR);
 541			break;
 542		case Opt_fatal_errors_bug:
 543			btrfs_clear_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR);
 
 
 544			break;
 545		default:
 546			btrfs_err(NULL, "unrecognized fatal_errors value %s",
 547				  param->key);
 548			return -EINVAL;
 549		}
 550		break;
 551	case Opt_commit_interval:
 552		ctx->commit_interval = result.uint_32;
 553		if (ctx->commit_interval == 0)
 554			ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
 555		break;
 556	case Opt_rescue:
 557		switch (result.uint_32) {
 558		case Opt_rescue_usebackuproot:
 559			btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT);
 560			break;
 561		case Opt_rescue_nologreplay:
 562			btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
 563			break;
 564		case Opt_rescue_ignorebadroots:
 565			btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS);
 566			break;
 567		case Opt_rescue_ignoredatacsums:
 568			btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
 569			break;
 570		case Opt_rescue_parameter_all:
 571			btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS);
 572			btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS);
 573			btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574			break;
 575		default:
 576			btrfs_info(NULL, "unrecognized rescue option '%s'",
 577				   param->key);
 578			return -EINVAL;
 579		}
 580		break;
 581#ifdef CONFIG_BTRFS_DEBUG
 582	case Opt_fragment:
 583		switch (result.uint_32) {
 584		case Opt_fragment_parameter_all:
 585			btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA);
 586			btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA);
 587			break;
 588		case Opt_fragment_parameter_metadata:
 589			btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA);
 
 
 590			break;
 591		case Opt_fragment_parameter_data:
 592			btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA);
 
 593			break;
 594		default:
 595			btrfs_info(NULL, "unrecognized fragment option '%s'",
 596				   param->key);
 597			return -EINVAL;
 598		}
 599		break;
 600#endif
 601#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 602	case Opt_ref_verify:
 603		btrfs_set_opt(ctx->mount_opt, REF_VERIFY);
 604		break;
 
 605#endif
 606	default:
 607		btrfs_err(NULL, "unrecognized mount option '%s'", param->key);
 608		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 609	}
 
 
 
 
 
 
 610
 611	return 0;
 
 
 
 
 
 612}
 613
 614/*
 615 * Some options only have meaning at mount time and shouldn't persist across
 616 * remounts, or be displayed. Clear these at the end of mount and remount code
 617 * paths.
 
 618 */
 619static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
 
 620{
 621	btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
 622	btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE);
 623	btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE);
 624}
 625
 626static bool check_ro_option(struct btrfs_fs_info *fs_info,
 627			    unsigned long mount_opt, unsigned long opt,
 628			    const char *opt_name)
 629{
 630	if (mount_opt & opt) {
 631		btrfs_err(fs_info, "%s must be used with ro mount option",
 632			  opt_name);
 633		return true;
 634	}
 635	return false;
 636}
 637
 638bool btrfs_check_options(struct btrfs_fs_info *info, unsigned long *mount_opt,
 639			 unsigned long flags)
 640{
 641	bool ret = true;
 
 
 
 
 
 
 
 642
 643	if (!(flags & SB_RDONLY) &&
 644	    (check_ro_option(info, *mount_opt, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
 645	     check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
 646	     check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums")))
 647		ret = false;
 648
 649	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
 650	    !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE) &&
 651	    !btrfs_raw_test_opt(*mount_opt, CLEAR_CACHE)) {
 652		btrfs_err(info, "cannot disable free-space-tree");
 653		ret = false;
 654	}
 655	if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) &&
 656	     !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) {
 657		btrfs_err(info, "cannot disable free-space-tree with block-group-tree feature");
 658		ret = false;
 659	}
 660
 661	if (btrfs_check_mountopts_zoned(info, mount_opt))
 662		ret = false;
 663
 664	if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) {
 665		if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE))
 666			btrfs_info(info, "disk space caching is enabled");
 667		if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE))
 668			btrfs_info(info, "using free-space-tree");
 669	}
 670
 671	return ret;
 
 
 672}
 673
 674/*
 675 * This is subtle, we only call this during open_ctree().  We need to pre-load
 676 * the mount options with the on-disk settings.  Before the new mount API took
 677 * effect we would do this on mount and remount.  With the new mount API we'll
 678 * only do this on the initial mount.
 679 *
 680 * This isn't a change in behavior, because we're using the current state of the
 681 * file system to set the current mount options.  If you mounted with special
 682 * options to disable these features and then remounted we wouldn't revert the
 683 * settings, because mounting without these features cleared the on-disk
 684 * settings, so this being called on re-mount is not needed.
 685 */
 686void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info)
 
 687{
 688	if (fs_info->sectorsize < PAGE_SIZE) {
 689		btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
 690		if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
 691			btrfs_info(fs_info,
 692				   "forcing free space tree for sector size %u with page size %lu",
 693				   fs_info->sectorsize, PAGE_SIZE);
 694			btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
 695		}
 696	}
 697
 698	/*
 699	 * At this point our mount options are populated, so we only mess with
 700	 * these settings if we don't have any settings already.
 701	 */
 702	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
 703		return;
 704
 705	if (btrfs_is_zoned(fs_info) &&
 706	    btrfs_free_space_cache_v1_active(fs_info)) {
 707		btrfs_info(fs_info, "zoned: clearing existing space cache");
 708		btrfs_set_super_cache_generation(fs_info->super_copy, 0);
 709		return;
 710	}
 711
 712	if (btrfs_test_opt(fs_info, SPACE_CACHE))
 713		return;
 
 
 714
 715	if (btrfs_test_opt(fs_info, NOSPACECACHE))
 716		return;
 
 
 
 
 
 
 
 
 
 
 
 
 717
 718	/*
 719	 * At this point we don't have explicit options set by the user, set
 720	 * them ourselves based on the state of the file system.
 721	 */
 722	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
 723		btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
 724	else if (btrfs_free_space_cache_v1_active(fs_info))
 725		btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
 726}
 727
 728static void set_device_specific_options(struct btrfs_fs_info *fs_info)
 729{
 730	if (!btrfs_test_opt(fs_info, NOSSD) &&
 731	    !fs_info->fs_devices->rotating)
 732		btrfs_set_opt(fs_info->mount_opt, SSD);
 
 
 
 
 733
 734	/*
 735	 * For devices supporting discard turn on discard=async automatically,
 736	 * unless it's already set or disabled. This could be turned off by
 737	 * nodiscard for the same mount.
 738	 *
 739	 * The zoned mode piggy backs on the discard functionality for
 740	 * resetting a zone. There is no reason to delay the zone reset as it is
 741	 * fast enough. So, do not enable async discard for zoned mode.
 742	 */
 743	if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) ||
 744	      btrfs_test_opt(fs_info, DISCARD_ASYNC) ||
 745	      btrfs_test_opt(fs_info, NODISCARD)) &&
 746	    fs_info->fs_devices->discardable &&
 747	    !btrfs_is_zoned(fs_info))
 748		btrfs_set_opt(fs_info->mount_opt, DISCARD_ASYNC);
 749}
 750
 751char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
 752					  u64 subvol_objectid)
 753{
 754	struct btrfs_root *root = fs_info->tree_root;
 755	struct btrfs_root *fs_root = NULL;
 756	struct btrfs_root_ref *root_ref;
 757	struct btrfs_inode_ref *inode_ref;
 758	struct btrfs_key key;
 759	struct btrfs_path *path = NULL;
 760	char *name = NULL, *ptr;
 761	u64 dirid;
 762	int len;
 763	int ret;
 764
 765	path = btrfs_alloc_path();
 766	if (!path) {
 767		ret = -ENOMEM;
 768		goto err;
 769	}
 
 770
 771	name = kmalloc(PATH_MAX, GFP_KERNEL);
 772	if (!name) {
 773		ret = -ENOMEM;
 774		goto err;
 775	}
 776	ptr = name + PATH_MAX - 1;
 777	ptr[0] = '\0';
 778
 779	/*
 780	 * Walk up the subvolume trees in the tree of tree roots by root
 781	 * backrefs until we hit the top-level subvolume.
 782	 */
 783	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
 784		key.objectid = subvol_objectid;
 785		key.type = BTRFS_ROOT_BACKREF_KEY;
 786		key.offset = (u64)-1;
 787
 788		ret = btrfs_search_backwards(root, &key, path);
 789		if (ret < 0) {
 790			goto err;
 791		} else if (ret > 0) {
 792			ret = -ENOENT;
 793			goto err;
 
 
 
 
 
 
 794		}
 795
 
 796		subvol_objectid = key.offset;
 797
 798		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
 799					  struct btrfs_root_ref);
 800		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
 801		ptr -= len + 1;
 802		if (ptr < name) {
 803			ret = -ENAMETOOLONG;
 804			goto err;
 805		}
 806		read_extent_buffer(path->nodes[0], ptr + 1,
 807				   (unsigned long)(root_ref + 1), len);
 808		ptr[0] = '/';
 809		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
 810		btrfs_release_path(path);
 811
 812		fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true);
 
 
 
 813		if (IS_ERR(fs_root)) {
 814			ret = PTR_ERR(fs_root);
 815			fs_root = NULL;
 816			goto err;
 817		}
 818
 819		/*
 820		 * Walk up the filesystem tree by inode refs until we hit the
 821		 * root directory.
 822		 */
 823		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
 824			key.objectid = dirid;
 825			key.type = BTRFS_INODE_REF_KEY;
 826			key.offset = (u64)-1;
 827
 828			ret = btrfs_search_backwards(fs_root, &key, path);
 829			if (ret < 0) {
 830				goto err;
 831			} else if (ret > 0) {
 832				ret = -ENOENT;
 833				goto err;
 
 
 
 
 
 
 834			}
 835
 
 836			dirid = key.offset;
 837
 838			inode_ref = btrfs_item_ptr(path->nodes[0],
 839						   path->slots[0],
 840						   struct btrfs_inode_ref);
 841			len = btrfs_inode_ref_name_len(path->nodes[0],
 842						       inode_ref);
 843			ptr -= len + 1;
 844			if (ptr < name) {
 845				ret = -ENAMETOOLONG;
 846				goto err;
 847			}
 848			read_extent_buffer(path->nodes[0], ptr + 1,
 849					   (unsigned long)(inode_ref + 1), len);
 850			ptr[0] = '/';
 851			btrfs_release_path(path);
 852		}
 853		btrfs_put_root(fs_root);
 854		fs_root = NULL;
 855	}
 856
 857	btrfs_free_path(path);
 858	if (ptr == name + PATH_MAX - 1) {
 859		name[0] = '/';
 860		name[1] = '\0';
 861	} else {
 862		memmove(name, ptr, name + PATH_MAX - ptr);
 863	}
 864	return name;
 865
 866err:
 867	btrfs_put_root(fs_root);
 868	btrfs_free_path(path);
 869	kfree(name);
 870	return ERR_PTR(ret);
 871}
 872
 873static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
 874{
 875	struct btrfs_root *root = fs_info->tree_root;
 876	struct btrfs_dir_item *di;
 877	struct btrfs_path *path;
 878	struct btrfs_key location;
 879	struct fscrypt_str name = FSTR_INIT("default", 7);
 880	u64 dir_id;
 881
 882	path = btrfs_alloc_path();
 883	if (!path)
 884		return -ENOMEM;
 
 885
 886	/*
 887	 * Find the "default" dir item which points to the root item that we
 888	 * will mount by default if we haven't been given a specific subvolume
 889	 * to mount.
 890	 */
 891	dir_id = btrfs_super_root_dir(fs_info->super_copy);
 892	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0);
 893	if (IS_ERR(di)) {
 894		btrfs_free_path(path);
 895		return PTR_ERR(di);
 896	}
 897	if (!di) {
 898		/*
 899		 * Ok the default dir item isn't there.  This is weird since
 900		 * it's always been there, but don't freak out, just try and
 901		 * mount the top-level subvolume.
 902		 */
 903		btrfs_free_path(path);
 904		*objectid = BTRFS_FS_TREE_OBJECTID;
 905		return 0;
 906	}
 907
 908	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
 909	btrfs_free_path(path);
 910	*objectid = location.objectid;
 911	return 0;
 912}
 913
 914static int btrfs_fill_super(struct super_block *sb,
 915			    struct btrfs_fs_devices *fs_devices,
 916			    void *data)
 917{
 918	struct inode *inode;
 919	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 
 920	int err;
 921
 922	sb->s_maxbytes = MAX_LFS_FILESIZE;
 923	sb->s_magic = BTRFS_SUPER_MAGIC;
 924	sb->s_op = &btrfs_super_ops;
 925	sb->s_d_op = &btrfs_dentry_operations;
 926	sb->s_export_op = &btrfs_export_ops;
 927#ifdef CONFIG_FS_VERITY
 928	sb->s_vop = &btrfs_verityops;
 929#endif
 930	sb->s_xattr = btrfs_xattr_handlers;
 931	sb->s_time_gran = 1;
 
 
 
 
 932	sb->s_iflags |= SB_I_CGROUPWB;
 933
 934	err = super_setup_bdi(sb);
 935	if (err) {
 936		btrfs_err(fs_info, "super_setup_bdi failed");
 937		return err;
 938	}
 939
 940	err = open_ctree(sb, fs_devices, (char *)data);
 941	if (err) {
 942		btrfs_err(fs_info, "open_ctree failed");
 943		return err;
 944	}
 945
 946	inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
 
 
 
 947	if (IS_ERR(inode)) {
 948		err = PTR_ERR(inode);
 949		btrfs_handle_fs_error(fs_info, err, NULL);
 950		goto fail_close;
 951	}
 952
 953	sb->s_root = d_make_root(inode);
 954	if (!sb->s_root) {
 955		err = -ENOMEM;
 956		goto fail_close;
 957	}
 958
 
 959	sb->s_flags |= SB_ACTIVE;
 960	return 0;
 961
 962fail_close:
 963	close_ctree(fs_info);
 964	return err;
 965}
 966
 967int btrfs_sync_fs(struct super_block *sb, int wait)
 968{
 969	struct btrfs_trans_handle *trans;
 970	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 971	struct btrfs_root *root = fs_info->tree_root;
 972
 973	trace_btrfs_sync_fs(fs_info, wait);
 974
 975	if (!wait) {
 976		filemap_flush(fs_info->btree_inode->i_mapping);
 977		return 0;
 978	}
 979
 980	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
 981
 982	trans = btrfs_attach_transaction_barrier(root);
 983	if (IS_ERR(trans)) {
 984		/* no transaction, don't bother */
 985		if (PTR_ERR(trans) == -ENOENT) {
 986			/*
 987			 * Exit unless we have some pending changes
 988			 * that need to go through commit
 989			 */
 990			if (!test_bit(BTRFS_FS_NEED_TRANS_COMMIT,
 991				      &fs_info->flags))
 992				return 0;
 993			/*
 994			 * A non-blocking test if the fs is frozen. We must not
 995			 * start a new transaction here otherwise a deadlock
 996			 * happens. The pending operations are delayed to the
 997			 * next commit after thawing.
 998			 */
 999			if (sb_start_write_trylock(sb))
1000				sb_end_write(sb);
1001			else
1002				return 0;
1003			trans = btrfs_start_transaction(root, 0);
1004		}
1005		if (IS_ERR(trans))
1006			return PTR_ERR(trans);
1007	}
1008	return btrfs_commit_transaction(trans);
1009}
1010
1011static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed)
1012{
1013	seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s);
1014	*printed = true;
1015}
1016
1017static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1018{
1019	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1020	const char *compress_type;
1021	const char *subvol_name;
1022	bool printed = false;
1023
1024	if (btrfs_test_opt(info, DEGRADED))
1025		seq_puts(seq, ",degraded");
1026	if (btrfs_test_opt(info, NODATASUM))
1027		seq_puts(seq, ",nodatasum");
1028	if (btrfs_test_opt(info, NODATACOW))
1029		seq_puts(seq, ",nodatacow");
1030	if (btrfs_test_opt(info, NOBARRIER))
1031		seq_puts(seq, ",nobarrier");
1032	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1033		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1034	if (info->thread_pool_size !=  min_t(unsigned long,
1035					     num_online_cpus() + 2, 8))
1036		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1037	if (btrfs_test_opt(info, COMPRESS)) {
1038		compress_type = btrfs_compress_type2str(info->compress_type);
1039		if (btrfs_test_opt(info, FORCE_COMPRESS))
1040			seq_printf(seq, ",compress-force=%s", compress_type);
1041		else
1042			seq_printf(seq, ",compress=%s", compress_type);
1043		if (info->compress_level)
1044			seq_printf(seq, ":%d", info->compress_level);
1045	}
1046	if (btrfs_test_opt(info, NOSSD))
1047		seq_puts(seq, ",nossd");
1048	if (btrfs_test_opt(info, SSD_SPREAD))
1049		seq_puts(seq, ",ssd_spread");
1050	else if (btrfs_test_opt(info, SSD))
1051		seq_puts(seq, ",ssd");
1052	if (btrfs_test_opt(info, NOTREELOG))
1053		seq_puts(seq, ",notreelog");
1054	if (btrfs_test_opt(info, NOLOGREPLAY))
1055		print_rescue_option(seq, "nologreplay", &printed);
1056	if (btrfs_test_opt(info, USEBACKUPROOT))
1057		print_rescue_option(seq, "usebackuproot", &printed);
1058	if (btrfs_test_opt(info, IGNOREBADROOTS))
1059		print_rescue_option(seq, "ignorebadroots", &printed);
1060	if (btrfs_test_opt(info, IGNOREDATACSUMS))
1061		print_rescue_option(seq, "ignoredatacsums", &printed);
1062	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1063		seq_puts(seq, ",flushoncommit");
1064	if (btrfs_test_opt(info, DISCARD_SYNC))
1065		seq_puts(seq, ",discard");
1066	if (btrfs_test_opt(info, DISCARD_ASYNC))
1067		seq_puts(seq, ",discard=async");
1068	if (!(info->sb->s_flags & SB_POSIXACL))
1069		seq_puts(seq, ",noacl");
1070	if (btrfs_free_space_cache_v1_active(info))
1071		seq_puts(seq, ",space_cache");
1072	else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
1073		seq_puts(seq, ",space_cache=v2");
1074	else
1075		seq_puts(seq, ",nospace_cache");
1076	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1077		seq_puts(seq, ",rescan_uuid_tree");
1078	if (btrfs_test_opt(info, CLEAR_CACHE))
1079		seq_puts(seq, ",clear_cache");
1080	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1081		seq_puts(seq, ",user_subvol_rm_allowed");
1082	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1083		seq_puts(seq, ",enospc_debug");
1084	if (btrfs_test_opt(info, AUTO_DEFRAG))
1085		seq_puts(seq, ",autodefrag");
 
 
1086	if (btrfs_test_opt(info, SKIP_BALANCE))
1087		seq_puts(seq, ",skip_balance");
 
 
 
 
 
 
 
 
 
1088	if (info->metadata_ratio)
1089		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1090	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1091		seq_puts(seq, ",fatal_errors=panic");
1092	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1093		seq_printf(seq, ",commit=%u", info->commit_interval);
1094#ifdef CONFIG_BTRFS_DEBUG
1095	if (btrfs_test_opt(info, FRAGMENT_DATA))
1096		seq_puts(seq, ",fragment=data");
1097	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1098		seq_puts(seq, ",fragment=metadata");
1099#endif
1100	if (btrfs_test_opt(info, REF_VERIFY))
1101		seq_puts(seq, ",ref_verify");
1102	seq_printf(seq, ",subvolid=%llu",
1103		  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1104	subvol_name = btrfs_get_subvol_name_from_objectid(info,
1105			BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1106	if (!IS_ERR(subvol_name)) {
1107		seq_puts(seq, ",subvol=");
1108		seq_escape(seq, subvol_name, " \t\n\\");
1109		kfree(subvol_name);
1110	}
1111	return 0;
1112}
1113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1114/*
1115 * subvolumes are identified by ino 256
1116 */
1117static inline int is_subvolume_inode(struct inode *inode)
1118{
1119	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1120		return 1;
1121	return 0;
1122}
1123
1124static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1125				   struct vfsmount *mnt)
1126{
1127	struct dentry *root;
1128	int ret;
1129
1130	if (!subvol_name) {
1131		if (!subvol_objectid) {
1132			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1133							  &subvol_objectid);
1134			if (ret) {
1135				root = ERR_PTR(ret);
1136				goto out;
1137			}
1138		}
1139		subvol_name = btrfs_get_subvol_name_from_objectid(
1140					btrfs_sb(mnt->mnt_sb), subvol_objectid);
1141		if (IS_ERR(subvol_name)) {
1142			root = ERR_CAST(subvol_name);
1143			subvol_name = NULL;
1144			goto out;
1145		}
1146
1147	}
1148
1149	root = mount_subtree(mnt, subvol_name);
1150	/* mount_subtree() drops our reference on the vfsmount. */
1151	mnt = NULL;
1152
1153	if (!IS_ERR(root)) {
1154		struct super_block *s = root->d_sb;
1155		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1156		struct inode *root_inode = d_inode(root);
1157		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1158
1159		ret = 0;
1160		if (!is_subvolume_inode(root_inode)) {
1161			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1162			       subvol_name);
1163			ret = -EINVAL;
1164		}
1165		if (subvol_objectid && root_objectid != subvol_objectid) {
1166			/*
1167			 * This will also catch a race condition where a
1168			 * subvolume which was passed by ID is renamed and
1169			 * another subvolume is renamed over the old location.
1170			 */
1171			btrfs_err(fs_info,
1172				  "subvol '%s' does not match subvolid %llu",
1173				  subvol_name, subvol_objectid);
1174			ret = -EINVAL;
1175		}
1176		if (ret) {
1177			dput(root);
1178			root = ERR_PTR(ret);
1179			deactivate_locked_super(s);
1180		}
1181	}
1182
1183out:
1184	mntput(mnt);
1185	kfree(subvol_name);
1186	return root;
1187}
1188
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1190				     u32 new_pool_size, u32 old_pool_size)
1191{
1192	if (new_pool_size == old_pool_size)
1193		return;
1194
1195	fs_info->thread_pool_size = new_pool_size;
1196
1197	btrfs_info(fs_info, "resize thread pool %d -> %d",
1198	       old_pool_size, new_pool_size);
1199
1200	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1201	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
 
1202	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1203	workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
1204	workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
 
 
1205	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1206	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1207	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
 
 
 
 
 
 
 
 
1208}
1209
1210static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1211				       unsigned long old_opts, int flags)
1212{
1213	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1214	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1215	     (flags & SB_RDONLY))) {
1216		/* wait for any defraggers to finish */
1217		wait_event(fs_info->transaction_wait,
1218			   (atomic_read(&fs_info->defrag_running) == 0));
1219		if (flags & SB_RDONLY)
1220			sync_filesystem(fs_info->sb);
1221	}
1222}
1223
1224static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1225					 unsigned long old_opts)
1226{
1227	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
1228
1229	/*
1230	 * We need to cleanup all defragable inodes if the autodefragment is
1231	 * close or the filesystem is read only.
1232	 */
1233	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1234	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1235		btrfs_cleanup_defrag_inodes(fs_info);
1236	}
1237
1238	/* If we toggled discard async */
1239	if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1240	    btrfs_test_opt(fs_info, DISCARD_ASYNC))
1241		btrfs_discard_resume(fs_info);
1242	else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1243		 !btrfs_test_opt(fs_info, DISCARD_ASYNC))
1244		btrfs_discard_cleanup(fs_info);
1245
1246	/* If we toggled space cache */
1247	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info))
1248		btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
1249}
1250
1251static int btrfs_remount_rw(struct btrfs_fs_info *fs_info)
1252{
 
 
 
 
 
 
 
 
1253	int ret;
1254
1255	if (BTRFS_FS_ERROR(fs_info)) {
1256		btrfs_err(fs_info,
1257			  "remounting read-write after error is not allowed");
1258		return -EINVAL;
1259	}
1260
1261	if (fs_info->fs_devices->rw_devices == 0)
1262		return -EACCES;
1263
1264	if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1265		btrfs_warn(fs_info,
1266			   "too many missing devices, writable remount is not allowed");
1267		return -EACCES;
1268	}
1269
1270	if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1271		btrfs_warn(fs_info,
1272			   "mount required to replay tree-log, cannot remount read-write");
1273		return -EINVAL;
 
 
1274	}
1275
1276	/*
1277	 * NOTE: when remounting with a change that does writes, don't put it
1278	 * anywhere above this point, as we are not sure to be safe to write
1279	 * until we pass the above checks.
1280	 */
1281	ret = btrfs_start_pre_rw_mount(fs_info);
1282	if (ret)
1283		return ret;
1284
1285	btrfs_clear_sb_rdonly(fs_info->sb);
1286
1287	set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1288
1289	/*
1290	 * If we've gone from readonly -> read-write, we need to get our
1291	 * sync/async discard lists in the right state.
1292	 */
1293	btrfs_discard_resume(fs_info);
1294
1295	return 0;
1296}
1297
1298static int btrfs_remount_ro(struct btrfs_fs_info *fs_info)
1299{
1300	/*
1301	 * This also happens on 'umount -rf' or on shutdown, when the
1302	 * filesystem is busy.
1303	 */
1304	cancel_work_sync(&fs_info->async_reclaim_work);
1305	cancel_work_sync(&fs_info->async_data_reclaim_work);
1306
1307	btrfs_discard_cleanup(fs_info);
1308
1309	/* Wait for the uuid_scan task to finish */
1310	down(&fs_info->uuid_tree_rescan_sem);
1311	/* Avoid complains from lockdep et al. */
1312	up(&fs_info->uuid_tree_rescan_sem);
1313
1314	btrfs_set_sb_rdonly(fs_info->sb);
1315
1316	/*
1317	 * Setting SB_RDONLY will put the cleaner thread to sleep at the next
1318	 * loop if it's already active.  If it's already asleep, we'll leave
1319	 * unused block groups on disk until we're mounted read-write again
1320	 * unless we clean them up here.
1321	 */
1322	btrfs_delete_unused_bgs(fs_info);
1323
1324	/*
1325	 * The cleaner task could be already running before we set the flag
1326	 * BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).  We must make
1327	 * sure that after we finish the remount, i.e. after we call
1328	 * btrfs_commit_super(), the cleaner can no longer start a transaction
1329	 * - either because it was dropping a dead root, running delayed iputs
1330	 *   or deleting an unused block group (the cleaner picked a block
1331	 *   group from the list of unused block groups before we were able to
1332	 *   in the previous call to btrfs_delete_unused_bgs()).
1333	 */
1334	wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING, TASK_UNINTERRUPTIBLE);
1335
1336	/*
1337	 * We've set the superblock to RO mode, so we might have made the
1338	 * cleaner task sleep without running all pending delayed iputs. Go
1339	 * through all the delayed iputs here, so that if an unmount happens
1340	 * without remounting RW we don't end up at finishing close_ctree()
1341	 * with a non-empty list of delayed iputs.
1342	 */
1343	btrfs_run_delayed_iputs(fs_info);
1344
1345	btrfs_dev_replace_suspend_for_unmount(fs_info);
1346	btrfs_scrub_cancel(fs_info);
1347	btrfs_pause_balance(fs_info);
1348
1349	/*
1350	 * Pause the qgroup rescan worker if it is running. We don't want it to
1351	 * be still running after we are in RO mode, as after that, by the time
1352	 * we unmount, it might have left a transaction open, so we would leak
1353	 * the transaction and/or crash.
1354	 */
1355	btrfs_qgroup_wait_for_completion(fs_info, false);
1356
1357	return btrfs_commit_super(fs_info);
1358}
 
 
1359
1360static void btrfs_ctx_to_info(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx)
1361{
1362	fs_info->max_inline = ctx->max_inline;
1363	fs_info->commit_interval = ctx->commit_interval;
1364	fs_info->metadata_ratio = ctx->metadata_ratio;
1365	fs_info->thread_pool_size = ctx->thread_pool_size;
1366	fs_info->mount_opt = ctx->mount_opt;
1367	fs_info->compress_type = ctx->compress_type;
1368	fs_info->compress_level = ctx->compress_level;
1369}
1370
1371static void btrfs_info_to_ctx(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx)
1372{
1373	ctx->max_inline = fs_info->max_inline;
1374	ctx->commit_interval = fs_info->commit_interval;
1375	ctx->metadata_ratio = fs_info->metadata_ratio;
1376	ctx->thread_pool_size = fs_info->thread_pool_size;
1377	ctx->mount_opt = fs_info->mount_opt;
1378	ctx->compress_type = fs_info->compress_type;
1379	ctx->compress_level = fs_info->compress_level;
1380}
1381
1382#define btrfs_info_if_set(fs_info, old_ctx, opt, fmt, args...)			\
1383do {										\
1384	if ((!old_ctx || !btrfs_raw_test_opt(old_ctx->mount_opt, opt)) &&	\
1385	    btrfs_raw_test_opt(fs_info->mount_opt, opt))			\
1386		btrfs_info(fs_info, fmt, ##args);				\
1387} while (0)
1388
1389#define btrfs_info_if_unset(fs_info, old_ctx, opt, fmt, args...)	\
1390do {									\
1391	if ((old_ctx && btrfs_raw_test_opt(old_ctx->mount_opt, opt)) &&	\
1392	    !btrfs_raw_test_opt(fs_info->mount_opt, opt))		\
1393		btrfs_info(fs_info, fmt, ##args);			\
1394} while (0)
1395
1396static void btrfs_emit_options(struct btrfs_fs_info *info,
1397			       struct btrfs_fs_context *old)
1398{
1399	btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
1400	btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts");
1401	btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum");
1402	btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations");
1403	btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme");
1404	btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers");
1405	btrfs_info_if_set(info, old, NOTREELOG, "disabling tree log");
1406	btrfs_info_if_set(info, old, NOLOGREPLAY, "disabling log replay at mount time");
1407	btrfs_info_if_set(info, old, FLUSHONCOMMIT, "turning on flush-on-commit");
1408	btrfs_info_if_set(info, old, DISCARD_SYNC, "turning on sync discard");
1409	btrfs_info_if_set(info, old, DISCARD_ASYNC, "turning on async discard");
1410	btrfs_info_if_set(info, old, FREE_SPACE_TREE, "enabling free space tree");
1411	btrfs_info_if_set(info, old, SPACE_CACHE, "enabling disk space caching");
1412	btrfs_info_if_set(info, old, CLEAR_CACHE, "force clearing of disk cache");
1413	btrfs_info_if_set(info, old, AUTO_DEFRAG, "enabling auto defrag");
1414	btrfs_info_if_set(info, old, FRAGMENT_DATA, "fragmenting data");
1415	btrfs_info_if_set(info, old, FRAGMENT_METADATA, "fragmenting metadata");
1416	btrfs_info_if_set(info, old, REF_VERIFY, "doing ref verification");
1417	btrfs_info_if_set(info, old, USEBACKUPROOT, "trying to use backup root at mount time");
1418	btrfs_info_if_set(info, old, IGNOREBADROOTS, "ignoring bad roots");
1419	btrfs_info_if_set(info, old, IGNOREDATACSUMS, "ignoring data csums");
1420
1421	btrfs_info_if_unset(info, old, NODATACOW, "setting datacow");
1422	btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations");
1423	btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme");
1424	btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers");
1425	btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log");
1426	btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching");
1427	btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree");
1428	btrfs_info_if_unset(info, old, AUTO_DEFRAG, "disabling auto defrag");
1429	btrfs_info_if_unset(info, old, COMPRESS, "use no compression");
1430
1431	/* Did the compression settings change? */
1432	if (btrfs_test_opt(info, COMPRESS) &&
1433	    (!old ||
1434	     old->compress_type != info->compress_type ||
1435	     old->compress_level != info->compress_level ||
1436	     (!btrfs_raw_test_opt(old->mount_opt, FORCE_COMPRESS) &&
1437	      btrfs_raw_test_opt(info->mount_opt, FORCE_COMPRESS)))) {
1438		const char *compress_type = btrfs_compress_type2str(info->compress_type);
1439
1440		btrfs_info(info, "%s %s compression, level %d",
1441			   btrfs_test_opt(info, FORCE_COMPRESS) ? "force" : "use",
1442			   compress_type, info->compress_level);
1443	}
1444
1445	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1446		btrfs_info(info, "max_inline set to %llu", info->max_inline);
1447}
 
 
 
 
 
1448
1449static int btrfs_reconfigure(struct fs_context *fc)
1450{
1451	struct super_block *sb = fc->root->d_sb;
1452	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1453	struct btrfs_fs_context *ctx = fc->fs_private;
1454	struct btrfs_fs_context old_ctx;
1455	int ret = 0;
1456	bool mount_reconfigure = (fc->s_fs_info != NULL);
 
 
 
 
 
 
 
 
 
 
1457
1458	btrfs_info_to_ctx(fs_info, &old_ctx);
 
 
 
 
 
1459
1460	/*
1461	 * This is our "bind mount" trick, we don't want to allow the user to do
1462	 * anything other than mount a different ro/rw and a different subvol,
1463	 * all of the mount options should be maintained.
1464	 */
1465	if (mount_reconfigure)
1466		ctx->mount_opt = old_ctx.mount_opt;
1467
1468	sync_filesystem(sb);
1469	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
 
1470
1471	if (!mount_reconfigure &&
1472	    !btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags))
1473		return -EINVAL;
 
 
 
1474
1475	ret = btrfs_check_features(fs_info, !(fc->sb_flags & SB_RDONLY));
1476	if (ret < 0)
1477		return ret;
1478
1479	btrfs_ctx_to_info(fs_info, ctx);
1480	btrfs_remount_begin(fs_info, old_ctx.mount_opt, fc->sb_flags);
1481	btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size,
1482				 old_ctx.thread_pool_size);
1483
1484	if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
1485	    (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
1486	    (!sb_rdonly(sb) || (fc->sb_flags & SB_RDONLY))) {
1487		btrfs_warn(fs_info,
1488		"remount supports changing free space tree only from RO to RW");
1489		/* Make sure free space cache options match the state on disk. */
1490		if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
1491			btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
1492			btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
1493		}
1494		if (btrfs_free_space_cache_v1_active(fs_info)) {
1495			btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE);
1496			btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
1497		}
1498	}
1499
1500	ret = 0;
1501	if (!sb_rdonly(sb) && (fc->sb_flags & SB_RDONLY))
1502		ret = btrfs_remount_ro(fs_info);
1503	else if (sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY))
1504		ret = btrfs_remount_rw(fs_info);
1505	if (ret)
1506		goto restore;
1507
1508	/*
1509	 * If we set the mask during the parameter parsing VFS would reject the
1510	 * remount.  Here we can set the mask and the value will be updated
1511	 * appropriately.
1512	 */
1513	if ((fc->sb_flags & SB_POSIXACL) != (sb->s_flags & SB_POSIXACL))
1514		fc->sb_flags_mask |= SB_POSIXACL;
 
 
 
 
1515
1516	btrfs_emit_options(fs_info, &old_ctx);
 
 
1517	wake_up_process(fs_info->transaction_kthread);
1518	btrfs_remount_cleanup(fs_info, old_ctx.mount_opt);
1519	btrfs_clear_oneshot_options(fs_info);
1520	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1521
1522	return 0;
 
1523restore:
1524	btrfs_ctx_to_info(fs_info, &old_ctx);
1525	btrfs_remount_cleanup(fs_info, old_ctx.mount_opt);
1526	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
 
 
 
 
 
 
 
 
1527	return ret;
1528}
1529
1530/* Used to sort the devices by max_avail(descending sort) */
1531static int btrfs_cmp_device_free_bytes(const void *a, const void *b)
 
1532{
1533	const struct btrfs_device_info *dev_info1 = a;
1534	const struct btrfs_device_info *dev_info2 = b;
1535
1536	if (dev_info1->max_avail > dev_info2->max_avail)
1537		return -1;
1538	else if (dev_info1->max_avail < dev_info2->max_avail)
 
1539		return 1;
 
1540	return 0;
1541}
1542
1543/*
1544 * sort the devices by max_avail, in which max free extent size of each device
1545 * is stored.(Descending Sort)
1546 */
1547static inline void btrfs_descending_sort_devices(
1548					struct btrfs_device_info *devices,
1549					size_t nr_devices)
1550{
1551	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
1552	     btrfs_cmp_device_free_bytes, NULL);
1553}
1554
1555/*
1556 * The helper to calc the free space on the devices that can be used to store
1557 * file data.
1558 */
1559static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
1560					      u64 *free_bytes)
1561{
1562	struct btrfs_device_info *devices_info;
1563	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1564	struct btrfs_device *device;
1565	u64 type;
1566	u64 avail_space;
1567	u64 min_stripe_size;
1568	int num_stripes = 1;
1569	int i = 0, nr_devices;
1570	const struct btrfs_raid_attr *rattr;
1571
1572	/*
1573	 * We aren't under the device list lock, so this is racy-ish, but good
1574	 * enough for our purposes.
1575	 */
1576	nr_devices = fs_info->fs_devices->open_devices;
1577	if (!nr_devices) {
1578		smp_mb();
1579		nr_devices = fs_info->fs_devices->open_devices;
1580		ASSERT(nr_devices);
1581		if (!nr_devices) {
1582			*free_bytes = 0;
1583			return 0;
1584		}
1585	}
1586
1587	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
1588			       GFP_KERNEL);
1589	if (!devices_info)
1590		return -ENOMEM;
1591
1592	/* calc min stripe number for data space allocation */
1593	type = btrfs_data_alloc_profile(fs_info);
1594	rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
1595
1596	if (type & BTRFS_BLOCK_GROUP_RAID0)
1597		num_stripes = nr_devices;
1598	else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK)
1599		num_stripes = rattr->ncopies;
1600	else if (type & BTRFS_BLOCK_GROUP_RAID10)
1601		num_stripes = 4;
1602
1603	/* Adjust for more than 1 stripe per device */
1604	min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
1605
1606	rcu_read_lock();
1607	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
1608		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1609						&device->dev_state) ||
1610		    !device->bdev ||
1611		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
1612			continue;
1613
1614		if (i >= nr_devices)
1615			break;
1616
1617		avail_space = device->total_bytes - device->bytes_used;
1618
1619		/* align with stripe_len */
1620		avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
1621
1622		/*
1623		 * Ensure we have at least min_stripe_size on top of the
1624		 * reserved space on the device.
 
 
 
 
1625		 */
1626		if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size)
1627			continue;
1628
1629		avail_space -= BTRFS_DEVICE_RANGE_RESERVED;
1630
1631		devices_info[i].dev = device;
1632		devices_info[i].max_avail = avail_space;
1633
1634		i++;
1635	}
1636	rcu_read_unlock();
1637
1638	nr_devices = i;
1639
1640	btrfs_descending_sort_devices(devices_info, nr_devices);
1641
1642	i = nr_devices - 1;
1643	avail_space = 0;
1644	while (nr_devices >= rattr->devs_min) {
1645		num_stripes = min(num_stripes, nr_devices);
1646
1647		if (devices_info[i].max_avail >= min_stripe_size) {
1648			int j;
1649			u64 alloc_size;
1650
1651			avail_space += devices_info[i].max_avail * num_stripes;
1652			alloc_size = devices_info[i].max_avail;
1653			for (j = i + 1 - num_stripes; j <= i; j++)
1654				devices_info[j].max_avail -= alloc_size;
1655		}
1656		i--;
1657		nr_devices--;
1658	}
1659
1660	kfree(devices_info);
1661	*free_bytes = avail_space;
1662	return 0;
1663}
1664
1665/*
1666 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
1667 *
1668 * If there's a redundant raid level at DATA block groups, use the respective
1669 * multiplier to scale the sizes.
1670 *
1671 * Unused device space usage is based on simulating the chunk allocator
1672 * algorithm that respects the device sizes and order of allocations.  This is
1673 * a close approximation of the actual use but there are other factors that may
1674 * change the result (like a new metadata chunk).
1675 *
1676 * If metadata is exhausted, f_bavail will be 0.
1677 */
1678static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1679{
1680	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
1681	struct btrfs_super_block *disk_super = fs_info->super_copy;
 
1682	struct btrfs_space_info *found;
1683	u64 total_used = 0;
1684	u64 total_free_data = 0;
1685	u64 total_free_meta = 0;
1686	u32 bits = fs_info->sectorsize_bits;
1687	__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
1688	unsigned factor = 1;
1689	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
1690	int ret;
1691	u64 thresh = 0;
1692	int mixed = 0;
1693
1694	list_for_each_entry(found, &fs_info->space_info, list) {
 
1695		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
1696			int i;
1697
1698			total_free_data += found->disk_total - found->disk_used;
1699			total_free_data -=
1700				btrfs_account_ro_block_groups_free_space(found);
1701
1702			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1703				if (!list_empty(&found->block_groups[i]))
1704					factor = btrfs_bg_type_to_factor(
1705						btrfs_raid_array[i].bg_flag);
1706			}
1707		}
1708
1709		/*
1710		 * Metadata in mixed block group profiles are accounted in data
1711		 */
1712		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
1713			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
1714				mixed = 1;
1715			else
1716				total_free_meta += found->disk_total -
1717					found->disk_used;
1718		}
1719
1720		total_used += found->disk_used;
1721	}
1722
 
 
1723	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
1724	buf->f_blocks >>= bits;
1725	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
1726
1727	/* Account global block reserve as used, it's in logical size already */
1728	spin_lock(&block_rsv->lock);
1729	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
1730	if (buf->f_bfree >= block_rsv->size >> bits)
1731		buf->f_bfree -= block_rsv->size >> bits;
1732	else
1733		buf->f_bfree = 0;
1734	spin_unlock(&block_rsv->lock);
1735
1736	buf->f_bavail = div_u64(total_free_data, factor);
1737	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
1738	if (ret)
1739		return ret;
1740	buf->f_bavail += div_u64(total_free_data, factor);
1741	buf->f_bavail = buf->f_bavail >> bits;
1742
1743	/*
1744	 * We calculate the remaining metadata space minus global reserve. If
1745	 * this is (supposedly) smaller than zero, there's no space. But this
1746	 * does not hold in practice, the exhausted state happens where's still
1747	 * some positive delta. So we apply some guesswork and compare the
1748	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
1749	 *
1750	 * We probably cannot calculate the exact threshold value because this
1751	 * depends on the internal reservations requested by various
1752	 * operations, so some operations that consume a few metadata will
1753	 * succeed even if the Avail is zero. But this is better than the other
1754	 * way around.
1755	 */
1756	thresh = SZ_4M;
1757
1758	/*
1759	 * We only want to claim there's no available space if we can no longer
1760	 * allocate chunks for our metadata profile and our global reserve will
1761	 * not fit in the free metadata space.  If we aren't ->full then we
1762	 * still can allocate chunks and thus are fine using the currently
1763	 * calculated f_bavail.
1764	 */
1765	if (!mixed && block_rsv->space_info->full &&
1766	    (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
1767		buf->f_bavail = 0;
1768
1769	buf->f_type = BTRFS_SUPER_MAGIC;
1770	buf->f_bsize = dentry->d_sb->s_blocksize;
1771	buf->f_namelen = BTRFS_NAME_LEN;
1772
1773	/* We treat it as constant endianness (it doesn't matter _which_)
1774	   because we want the fsid to come out the same whether mounted
1775	   on a big-endian or little-endian host */
1776	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
1777	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
1778	/* Mask in the root object ID too, to disambiguate subvols */
1779	buf->f_fsid.val[0] ^=
1780		BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
1781	buf->f_fsid.val[1] ^=
1782		BTRFS_I(d_inode(dentry))->root->root_key.objectid;
1783
1784	return 0;
1785}
1786
1787static int btrfs_fc_test_super(struct super_block *sb, struct fs_context *fc)
1788{
1789	struct btrfs_fs_info *p = fc->s_fs_info;
1790	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1791
1792	return fs_info->fs_devices == p->fs_devices;
1793}
1794
1795static int btrfs_get_tree_super(struct fs_context *fc)
1796{
1797	struct btrfs_fs_info *fs_info = fc->s_fs_info;
1798	struct btrfs_fs_context *ctx = fc->fs_private;
1799	struct btrfs_fs_devices *fs_devices = NULL;
1800	struct block_device *bdev;
1801	struct btrfs_device *device;
1802	struct super_block *sb;
1803	blk_mode_t mode = btrfs_open_mode(fc);
1804	int ret;
1805
1806	btrfs_ctx_to_info(fs_info, ctx);
1807	mutex_lock(&uuid_mutex);
1808
1809	/*
1810	 * With 'true' passed to btrfs_scan_one_device() (mount time) we expect
1811	 * either a valid device or an error.
1812	 */
1813	device = btrfs_scan_one_device(fc->source, mode, true);
1814	ASSERT(device != NULL);
1815	if (IS_ERR(device)) {
1816		mutex_unlock(&uuid_mutex);
1817		return PTR_ERR(device);
1818	}
1819
1820	fs_devices = device->fs_devices;
1821	fs_info->fs_devices = fs_devices;
1822
1823	ret = btrfs_open_devices(fs_devices, mode, &btrfs_fs_type);
1824	mutex_unlock(&uuid_mutex);
1825	if (ret)
1826		return ret;
1827
1828	if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1829		ret = -EACCES;
1830		goto error;
1831	}
1832
1833	bdev = fs_devices->latest_dev->bdev;
1834
1835	/*
1836	 * From now on the error handling is not straightforward.
1837	 *
1838	 * If successful, this will transfer the fs_info into the super block,
1839	 * and fc->s_fs_info will be NULL.  However if there's an existing
1840	 * super, we'll still have fc->s_fs_info populated.  If we error
1841	 * completely out it'll be cleaned up when we drop the fs_context,
1842	 * otherwise it's tied to the lifetime of the super_block.
1843	 */
1844	sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc);
1845	if (IS_ERR(sb)) {
1846		ret = PTR_ERR(sb);
1847		goto error;
1848	}
1849
1850	set_device_specific_options(fs_info);
1851
1852	if (sb->s_root) {
1853		btrfs_close_devices(fs_devices);
1854		if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY)
1855			ret = -EBUSY;
1856	} else {
1857		snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1858		shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);
1859		btrfs_sb(sb)->bdev_holder = &btrfs_fs_type;
1860		ret = btrfs_fill_super(sb, fs_devices, NULL);
1861	}
1862
1863	if (ret) {
1864		deactivate_locked_super(sb);
1865		return ret;
1866	}
1867
1868	btrfs_clear_oneshot_options(fs_info);
1869
1870	fc->root = dget(sb->s_root);
1871	return 0;
1872
1873error:
1874	btrfs_close_devices(fs_devices);
1875	return ret;
1876}
1877
1878/*
1879 * Ever since commit 0723a0473fb4 ("btrfs: allow mounting btrfs subvolumes
1880 * with different ro/rw options") the following works:
1881 *
1882 *        (i) mount /dev/sda3 -o subvol=foo,ro /mnt/foo
1883 *       (ii) mount /dev/sda3 -o subvol=bar,rw /mnt/bar
1884 *
1885 * which looks nice and innocent but is actually pretty intricate and deserves
1886 * a long comment.
1887 *
1888 * On another filesystem a subvolume mount is close to something like:
1889 *
1890 *	(iii) # create rw superblock + initial mount
1891 *	      mount -t xfs /dev/sdb /opt/
1892 *
1893 *	      # create ro bind mount
1894 *	      mount --bind -o ro /opt/foo /mnt/foo
1895 *
1896 *	      # unmount initial mount
1897 *	      umount /opt
1898 *
1899 * Of course, there's some special subvolume sauce and there's the fact that the
1900 * sb->s_root dentry is really swapped after mount_subtree(). But conceptually
1901 * it's very close and will help us understand the issue.
1902 *
1903 * The old mount API didn't cleanly distinguish between a mount being made ro
1904 * and a superblock being made ro.  The only way to change the ro state of
1905 * either object was by passing ms_rdonly. If a new mount was created via
1906 * mount(2) such as:
1907 *
1908 *      mount("/dev/sdb", "/mnt", "xfs", ms_rdonly, null);
1909 *
1910 * the MS_RDONLY flag being specified had two effects:
1911 *
1912 * (1) MNT_READONLY was raised -> the resulting mount got
1913 *     @mnt->mnt_flags |= MNT_READONLY raised.
1914 *
1915 * (2) MS_RDONLY was passed to the filesystem's mount method and the filesystems
1916 *     made the superblock ro. Note, how SB_RDONLY has the same value as
1917 *     ms_rdonly and is raised whenever MS_RDONLY is passed through mount(2).
1918 *
1919 * Creating a subtree mount via (iii) ends up leaving a rw superblock with a
1920 * subtree mounted ro.
1921 *
1922 * But consider the effect on the old mount API on btrfs subvolume mounting
1923 * which combines the distinct step in (iii) into a single step.
1924 *
1925 * By issuing (i) both the mount and the superblock are turned ro. Now when (ii)
1926 * is issued the superblock is ro and thus even if the mount created for (ii) is
1927 * rw it wouldn't help. Hence, btrfs needed to transition the superblock from ro
1928 * to rw for (ii) which it did using an internal remount call.
1929 *
1930 * IOW, subvolume mounting was inherently complicated due to the ambiguity of
1931 * MS_RDONLY in mount(2). Note, this ambiguity has mount(8) always translate
1932 * "ro" to MS_RDONLY. IOW, in both (i) and (ii) "ro" becomes MS_RDONLY when
1933 * passed by mount(8) to mount(2).
1934 *
1935 * Enter the new mount API. The new mount API disambiguates making a mount ro
1936 * and making a superblock ro.
1937 *
1938 * (3) To turn a mount ro the MOUNT_ATTR_ONLY flag can be used with either
1939 *     fsmount() or mount_setattr() this is a pure VFS level change for a
1940 *     specific mount or mount tree that is never seen by the filesystem itself.
1941 *
1942 * (4) To turn a superblock ro the "ro" flag must be used with
1943 *     fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem
1944 *     in fc->sb_flags.
1945 *
1946 * This disambiguation has rather positive consequences.  Mounting a subvolume
1947 * ro will not also turn the superblock ro. Only the mount for the subvolume
1948 * will become ro.
1949 *
1950 * So, if the superblock creation request comes from the new mount API the
1951 * caller must have explicitly done:
1952 *
1953 *      fsconfig(FSCONFIG_SET_FLAG, "ro")
1954 *      fsmount/mount_setattr(MOUNT_ATTR_RDONLY)
1955 *
1956 * IOW, at some point the caller must have explicitly turned the whole
1957 * superblock ro and we shouldn't just undo it like we did for the old mount
1958 * API. In any case, it lets us avoid the hack in the new mount API.
1959 *
1960 * Consequently, the remounting hack must only be used for requests originating
1961 * from the old mount API and should be marked for full deprecation so it can be
1962 * turned off in a couple of years.
1963 *
1964 * The new mount API has no reason to support this hack.
1965 */
1966static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc)
1967{
1968	struct vfsmount *mnt;
1969	int ret;
1970	const bool ro2rw = !(fc->sb_flags & SB_RDONLY);
1971
1972	/*
1973	 * We got an EBUSY because our SB_RDONLY flag didn't match the existing
1974	 * super block, so invert our setting here and retry the mount so we
1975	 * can get our vfsmount.
1976	 */
1977	if (ro2rw)
1978		fc->sb_flags |= SB_RDONLY;
1979	else
1980		fc->sb_flags &= ~SB_RDONLY;
1981
1982	mnt = fc_mount(fc);
1983	if (IS_ERR(mnt))
1984		return mnt;
1985
1986	if (!fc->oldapi || !ro2rw)
1987		return mnt;
1988
1989	/* We need to convert to rw, call reconfigure. */
1990	fc->sb_flags &= ~SB_RDONLY;
1991	down_write(&mnt->mnt_sb->s_umount);
1992	ret = btrfs_reconfigure(fc);
1993	up_write(&mnt->mnt_sb->s_umount);
1994	if (ret) {
1995		mntput(mnt);
1996		return ERR_PTR(ret);
1997	}
1998	return mnt;
1999}
2000
2001static int btrfs_get_tree_subvol(struct fs_context *fc)
2002{
2003	struct btrfs_fs_info *fs_info = NULL;
2004	struct btrfs_fs_context *ctx = fc->fs_private;
2005	struct fs_context *dup_fc;
2006	struct dentry *dentry;
2007	struct vfsmount *mnt;
2008
2009	/*
2010	 * Setup a dummy root and fs_info for test/set super.  This is because
2011	 * we don't actually fill this stuff out until open_ctree, but we need
2012	 * then open_ctree will properly initialize the file system specific
2013	 * settings later.  btrfs_init_fs_info initializes the static elements
2014	 * of the fs_info (locks and such) to make cleanup easier if we find a
2015	 * superblock with our given fs_devices later on at sget() time.
2016	 */
2017	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
2018	if (!fs_info)
2019		return -ENOMEM;
2020
2021	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
2022	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
2023	if (!fs_info->super_copy || !fs_info->super_for_commit) {
2024		btrfs_free_fs_info(fs_info);
2025		return -ENOMEM;
2026	}
2027	btrfs_init_fs_info(fs_info);
2028
2029	dup_fc = vfs_dup_fs_context(fc);
2030	if (IS_ERR(dup_fc)) {
2031		btrfs_free_fs_info(fs_info);
2032		return PTR_ERR(dup_fc);
2033	}
2034
2035	/*
2036	 * When we do the sget_fc this gets transferred to the sb, so we only
2037	 * need to set it on the dup_fc as this is what creates the super block.
2038	 */
2039	dup_fc->s_fs_info = fs_info;
2040
2041	/*
2042	 * We'll do the security settings in our btrfs_get_tree_super() mount
2043	 * loop, they were duplicated into dup_fc, we can drop the originals
2044	 * here.
2045	 */
2046	security_free_mnt_opts(&fc->security);
2047	fc->security = NULL;
2048
2049	mnt = fc_mount(dup_fc);
2050	if (PTR_ERR_OR_ZERO(mnt) == -EBUSY)
2051		mnt = btrfs_reconfigure_for_mount(dup_fc);
2052	put_fs_context(dup_fc);
2053	if (IS_ERR(mnt))
2054		return PTR_ERR(mnt);
2055
2056	/*
2057	 * This free's ->subvol_name, because if it isn't set we have to
2058	 * allocate a buffer to hold the subvol_name, so we just drop our
2059	 * reference to it here.
2060	 */
2061	dentry = mount_subvol(ctx->subvol_name, ctx->subvol_objectid, mnt);
2062	ctx->subvol_name = NULL;
2063	if (IS_ERR(dentry))
2064		return PTR_ERR(dentry);
2065
2066	fc->root = dentry;
2067	return 0;
2068}
2069
2070static int btrfs_get_tree(struct fs_context *fc)
2071{
2072	/*
2073	 * Since we use mount_subtree to mount the default/specified subvol, we
2074	 * have to do mounts in two steps.
2075	 *
2076	 * First pass through we call btrfs_get_tree_subvol(), this is just a
2077	 * wrapper around fc_mount() to call back into here again, and this time
2078	 * we'll call btrfs_get_tree_super().  This will do the open_ctree() and
2079	 * everything to open the devices and file system.  Then we return back
2080	 * with a fully constructed vfsmount in btrfs_get_tree_subvol(), and
2081	 * from there we can do our mount_subvol() call, which will lookup
2082	 * whichever subvol we're mounting and setup this fc with the
2083	 * appropriate dentry for the subvol.
2084	 */
2085	if (fc->s_fs_info)
2086		return btrfs_get_tree_super(fc);
2087	return btrfs_get_tree_subvol(fc);
2088}
2089
2090static void btrfs_kill_super(struct super_block *sb)
2091{
2092	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2093	kill_anon_super(sb);
2094	btrfs_free_fs_info(fs_info);
2095}
2096
2097static void btrfs_free_fs_context(struct fs_context *fc)
2098{
2099	struct btrfs_fs_context *ctx = fc->fs_private;
2100	struct btrfs_fs_info *fs_info = fc->s_fs_info;
2101
2102	if (fs_info)
2103		btrfs_free_fs_info(fs_info);
2104
2105	if (ctx && refcount_dec_and_test(&ctx->refs)) {
2106		kfree(ctx->subvol_name);
2107		kfree(ctx);
2108	}
2109}
2110
2111static int btrfs_dup_fs_context(struct fs_context *fc, struct fs_context *src_fc)
2112{
2113	struct btrfs_fs_context *ctx = src_fc->fs_private;
2114
2115	/*
2116	 * Give a ref to our ctx to this dup, as we want to keep it around for
2117	 * our original fc so we can have the subvolume name or objectid.
2118	 *
2119	 * We unset ->source in the original fc because the dup needs it for
2120	 * mounting, and then once we free the dup it'll free ->source, so we
2121	 * need to make sure we're only pointing to it in one fc.
2122	 */
2123	refcount_inc(&ctx->refs);
2124	fc->fs_private = ctx;
2125	fc->source = src_fc->source;
2126	src_fc->source = NULL;
2127	return 0;
2128}
2129
2130static const struct fs_context_operations btrfs_fs_context_ops = {
2131	.parse_param	= btrfs_parse_param,
2132	.reconfigure	= btrfs_reconfigure,
2133	.get_tree	= btrfs_get_tree,
2134	.dup		= btrfs_dup_fs_context,
2135	.free		= btrfs_free_fs_context,
2136};
2137
2138static int btrfs_init_fs_context(struct fs_context *fc)
2139{
2140	struct btrfs_fs_context *ctx;
2141
2142	ctx = kzalloc(sizeof(struct btrfs_fs_context), GFP_KERNEL);
2143	if (!ctx)
2144		return -ENOMEM;
2145
2146	refcount_set(&ctx->refs, 1);
2147	fc->fs_private = ctx;
2148	fc->ops = &btrfs_fs_context_ops;
2149
2150	if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
2151		btrfs_info_to_ctx(btrfs_sb(fc->root->d_sb), ctx);
2152	} else {
2153		ctx->thread_pool_size =
2154			min_t(unsigned long, num_online_cpus() + 2, 8);
2155		ctx->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2156		ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2157	}
2158
2159#ifdef CONFIG_BTRFS_FS_POSIX_ACL
2160	fc->sb_flags |= SB_POSIXACL;
2161#endif
2162	fc->sb_flags |= SB_I_VERSION;
2163
2164	return 0;
2165}
2166
2167static struct file_system_type btrfs_fs_type = {
2168	.owner			= THIS_MODULE,
2169	.name			= "btrfs",
2170	.init_fs_context	= btrfs_init_fs_context,
2171	.parameters		= btrfs_fs_parameters,
2172	.kill_sb		= btrfs_kill_super,
2173	.fs_flags		= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
2174 };
2175
2176MODULE_ALIAS_FS("btrfs");
2177
2178static int btrfs_control_open(struct inode *inode, struct file *file)
2179{
2180	/*
2181	 * The control file's private_data is used to hold the
2182	 * transaction when it is started and is used to keep
2183	 * track of whether a transaction is already in progress.
2184	 */
2185	file->private_data = NULL;
2186	return 0;
2187}
2188
2189/*
2190 * Used by /dev/btrfs-control for devices ioctls.
2191 */
2192static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2193				unsigned long arg)
2194{
2195	struct btrfs_ioctl_vol_args *vol;
2196	struct btrfs_device *device = NULL;
2197	dev_t devt = 0;
2198	int ret = -ENOTTY;
2199
2200	if (!capable(CAP_SYS_ADMIN))
2201		return -EPERM;
2202
2203	vol = memdup_user((void __user *)arg, sizeof(*vol));
2204	if (IS_ERR(vol))
2205		return PTR_ERR(vol);
2206	vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2207
2208	switch (cmd) {
2209	case BTRFS_IOC_SCAN_DEV:
2210		mutex_lock(&uuid_mutex);
2211		/*
2212		 * Scanning outside of mount can return NULL which would turn
2213		 * into 0 error code.
2214		 */
2215		device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
2216		ret = PTR_ERR_OR_ZERO(device);
2217		mutex_unlock(&uuid_mutex);
2218		break;
2219	case BTRFS_IOC_FORGET_DEV:
2220		if (vol->name[0] != 0) {
2221			ret = lookup_bdev(vol->name, &devt);
2222			if (ret)
2223				break;
2224		}
2225		ret = btrfs_forget_devices(devt);
2226		break;
2227	case BTRFS_IOC_DEVICES_READY:
2228		mutex_lock(&uuid_mutex);
2229		/*
2230		 * Scanning outside of mount can return NULL which would turn
2231		 * into 0 error code.
2232		 */
2233		device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
2234		if (IS_ERR_OR_NULL(device)) {
2235			mutex_unlock(&uuid_mutex);
2236			ret = PTR_ERR(device);
2237			break;
2238		}
2239		ret = !(device->fs_devices->num_devices ==
2240			device->fs_devices->total_devices);
2241		mutex_unlock(&uuid_mutex);
2242		break;
2243	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2244		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2245		break;
2246	}
2247
2248	kfree(vol);
2249	return ret;
2250}
2251
2252static int btrfs_freeze(struct super_block *sb)
2253{
2254	struct btrfs_trans_handle *trans;
2255	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2256	struct btrfs_root *root = fs_info->tree_root;
2257
2258	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2259	/*
2260	 * We don't need a barrier here, we'll wait for any transaction that
2261	 * could be in progress on other threads (and do delayed iputs that
2262	 * we want to avoid on a frozen filesystem), or do the commit
2263	 * ourselves.
2264	 */
2265	trans = btrfs_attach_transaction_barrier(root);
2266	if (IS_ERR(trans)) {
2267		/* no transaction, don't bother */
2268		if (PTR_ERR(trans) == -ENOENT)
2269			return 0;
2270		return PTR_ERR(trans);
2271	}
2272	return btrfs_commit_transaction(trans);
2273}
2274
2275static int check_dev_super(struct btrfs_device *dev)
2276{
2277	struct btrfs_fs_info *fs_info = dev->fs_info;
2278	struct btrfs_super_block *sb;
2279	u64 last_trans;
2280	u16 csum_type;
2281	int ret = 0;
2282
2283	/* This should be called with fs still frozen. */
2284	ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags));
2285
2286	/* Missing dev, no need to check. */
2287	if (!dev->bdev)
2288		return 0;
2289
2290	/* Only need to check the primary super block. */
2291	sb = btrfs_read_dev_one_super(dev->bdev, 0, true);
2292	if (IS_ERR(sb))
2293		return PTR_ERR(sb);
2294
2295	/* Verify the checksum. */
2296	csum_type = btrfs_super_csum_type(sb);
2297	if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) {
2298		btrfs_err(fs_info, "csum type changed, has %u expect %u",
2299			  csum_type, btrfs_super_csum_type(fs_info->super_copy));
2300		ret = -EUCLEAN;
2301		goto out;
2302	}
2303
2304	if (btrfs_check_super_csum(fs_info, sb)) {
2305		btrfs_err(fs_info, "csum for on-disk super block no longer matches");
2306		ret = -EUCLEAN;
2307		goto out;
2308	}
2309
2310	/* Btrfs_validate_super() includes fsid check against super->fsid. */
2311	ret = btrfs_validate_super(fs_info, sb, 0);
2312	if (ret < 0)
2313		goto out;
2314
2315	last_trans = btrfs_get_last_trans_committed(fs_info);
2316	if (btrfs_super_generation(sb) != last_trans) {
2317		btrfs_err(fs_info, "transid mismatch, has %llu expect %llu",
2318			  btrfs_super_generation(sb), last_trans);
2319		ret = -EUCLEAN;
2320		goto out;
2321	}
2322out:
2323	btrfs_release_disk_super(sb);
2324	return ret;
2325}
2326
2327static int btrfs_unfreeze(struct super_block *sb)
2328{
2329	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2330	struct btrfs_device *device;
2331	int ret = 0;
2332
2333	/*
2334	 * Make sure the fs is not changed by accident (like hibernation then
2335	 * modified by other OS).
2336	 * If we found anything wrong, we mark the fs error immediately.
2337	 *
2338	 * And since the fs is frozen, no one can modify the fs yet, thus
2339	 * we don't need to hold device_list_mutex.
2340	 */
2341	list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
2342		ret = check_dev_super(device);
2343		if (ret < 0) {
2344			btrfs_handle_fs_error(fs_info, ret,
2345				"super block on devid %llu got modified unexpectedly",
2346				device->devid);
2347			break;
2348		}
2349	}
2350	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2351
2352	/*
2353	 * We still return 0, to allow VFS layer to unfreeze the fs even the
2354	 * above checks failed. Since the fs is either fine or read-only, we're
2355	 * safe to continue, without causing further damage.
2356	 */
2357	return 0;
2358}
2359
2360static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2361{
2362	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
 
 
 
2363
2364	/*
2365	 * There should be always a valid pointer in latest_dev, it may be stale
2366	 * for a short moment in case it's being deleted but still valid until
2367	 * the end of RCU grace period.
 
 
2368	 */
2369	rcu_read_lock();
2370	seq_escape(m, btrfs_dev_name(fs_info->fs_devices->latest_dev), " \t\n\\");
2371	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
2372
 
 
 
 
 
2373	return 0;
2374}
2375
2376static const struct super_operations btrfs_super_ops = {
2377	.drop_inode	= btrfs_drop_inode,
2378	.evict_inode	= btrfs_evict_inode,
2379	.put_super	= btrfs_put_super,
2380	.sync_fs	= btrfs_sync_fs,
2381	.show_options	= btrfs_show_options,
2382	.show_devname	= btrfs_show_devname,
2383	.alloc_inode	= btrfs_alloc_inode,
2384	.destroy_inode	= btrfs_destroy_inode,
2385	.free_inode	= btrfs_free_inode,
2386	.statfs		= btrfs_statfs,
 
2387	.freeze_fs	= btrfs_freeze,
2388	.unfreeze_fs	= btrfs_unfreeze,
2389};
2390
2391static const struct file_operations btrfs_ctl_fops = {
2392	.open = btrfs_control_open,
2393	.unlocked_ioctl	 = btrfs_control_ioctl,
2394	.compat_ioctl = compat_ptr_ioctl,
2395	.owner	 = THIS_MODULE,
2396	.llseek = noop_llseek,
2397};
2398
2399static struct miscdevice btrfs_misc = {
2400	.minor		= BTRFS_MINOR,
2401	.name		= "btrfs-control",
2402	.fops		= &btrfs_ctl_fops
2403};
2404
2405MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2406MODULE_ALIAS("devname:btrfs-control");
2407
2408static int __init btrfs_interface_init(void)
2409{
2410	return misc_register(&btrfs_misc);
2411}
2412
2413static __cold void btrfs_interface_exit(void)
2414{
2415	misc_deregister(&btrfs_misc);
2416}
2417
2418static int __init btrfs_print_mod_info(void)
2419{
2420	static const char options[] = ""
2421#ifdef CONFIG_BTRFS_DEBUG
2422			", debug=on"
2423#endif
2424#ifdef CONFIG_BTRFS_ASSERT
2425			", assert=on"
2426#endif
 
 
 
2427#ifdef CONFIG_BTRFS_FS_REF_VERIFY
2428			", ref-verify=on"
2429#endif
2430#ifdef CONFIG_BLK_DEV_ZONED
2431			", zoned=yes"
2432#else
2433			", zoned=no"
2434#endif
2435#ifdef CONFIG_FS_VERITY
2436			", fsverity=yes"
2437#else
2438			", fsverity=no"
2439#endif
2440			;
2441	pr_info("Btrfs loaded%s\n", options);
2442	return 0;
2443}
2444
2445static int register_btrfs(void)
2446{
2447	return register_filesystem(&btrfs_fs_type);
2448}
2449
2450static void unregister_btrfs(void)
2451{
2452	unregister_filesystem(&btrfs_fs_type);
2453}
2454
2455/* Helper structure for long init/exit functions. */
2456struct init_sequence {
2457	int (*init_func)(void);
2458	/* Can be NULL if the init_func doesn't need cleanup. */
2459	void (*exit_func)(void);
2460};
2461
2462static const struct init_sequence mod_init_seq[] = {
2463	{
2464		.init_func = btrfs_props_init,
2465		.exit_func = NULL,
2466	}, {
2467		.init_func = btrfs_init_sysfs,
2468		.exit_func = btrfs_exit_sysfs,
2469	}, {
2470		.init_func = btrfs_init_compress,
2471		.exit_func = btrfs_exit_compress,
2472	}, {
2473		.init_func = btrfs_init_cachep,
2474		.exit_func = btrfs_destroy_cachep,
2475	}, {
2476		.init_func = btrfs_transaction_init,
2477		.exit_func = btrfs_transaction_exit,
2478	}, {
2479		.init_func = btrfs_ctree_init,
2480		.exit_func = btrfs_ctree_exit,
2481	}, {
2482		.init_func = btrfs_free_space_init,
2483		.exit_func = btrfs_free_space_exit,
2484	}, {
2485		.init_func = extent_state_init_cachep,
2486		.exit_func = extent_state_free_cachep,
2487	}, {
2488		.init_func = extent_buffer_init_cachep,
2489		.exit_func = extent_buffer_free_cachep,
2490	}, {
2491		.init_func = btrfs_bioset_init,
2492		.exit_func = btrfs_bioset_exit,
2493	}, {
2494		.init_func = extent_map_init,
2495		.exit_func = extent_map_exit,
2496	}, {
2497		.init_func = ordered_data_init,
2498		.exit_func = ordered_data_exit,
2499	}, {
2500		.init_func = btrfs_delayed_inode_init,
2501		.exit_func = btrfs_delayed_inode_exit,
2502	}, {
2503		.init_func = btrfs_auto_defrag_init,
2504		.exit_func = btrfs_auto_defrag_exit,
2505	}, {
2506		.init_func = btrfs_delayed_ref_init,
2507		.exit_func = btrfs_delayed_ref_exit,
2508	}, {
2509		.init_func = btrfs_prelim_ref_init,
2510		.exit_func = btrfs_prelim_ref_exit,
2511	}, {
2512		.init_func = btrfs_interface_init,
2513		.exit_func = btrfs_interface_exit,
2514	}, {
2515		.init_func = btrfs_print_mod_info,
2516		.exit_func = NULL,
2517	}, {
2518		.init_func = btrfs_run_sanity_tests,
2519		.exit_func = NULL,
2520	}, {
2521		.init_func = register_btrfs,
2522		.exit_func = unregister_btrfs,
2523	}
2524};
2525
2526static bool mod_init_result[ARRAY_SIZE(mod_init_seq)];
 
 
2527
2528static __always_inline void btrfs_exit_btrfs_fs(void)
2529{
2530	int i;
2531
2532	for (i = ARRAY_SIZE(mod_init_seq) - 1; i >= 0; i--) {
2533		if (!mod_init_result[i])
2534			continue;
2535		if (mod_init_seq[i].exit_func)
2536			mod_init_seq[i].exit_func();
2537		mod_init_result[i] = false;
2538	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2539}
2540
2541static void __exit exit_btrfs_fs(void)
2542{
2543	btrfs_exit_btrfs_fs();
 
 
 
 
 
 
 
 
 
 
 
2544	btrfs_cleanup_fs_uuids();
2545}
2546
2547static int __init init_btrfs_fs(void)
2548{
2549	int ret;
2550	int i;
2551
2552	for (i = 0; i < ARRAY_SIZE(mod_init_seq); i++) {
2553		ASSERT(!mod_init_result[i]);
2554		ret = mod_init_seq[i].init_func();
2555		if (ret < 0) {
2556			btrfs_exit_btrfs_fs();
2557			return ret;
2558		}
2559		mod_init_result[i] = true;
2560	}
2561	return 0;
2562}
2563
2564late_initcall(init_btrfs_fs);
2565module_exit(exit_btrfs_fs)
2566
2567MODULE_LICENSE("GPL");
2568MODULE_SOFTDEP("pre: crc32c");
2569MODULE_SOFTDEP("pre: xxhash64");
2570MODULE_SOFTDEP("pre: sha256");
2571MODULE_SOFTDEP("pre: blake2b-256");
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/module.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/highmem.h>
  11#include <linux/time.h>
  12#include <linux/init.h>
  13#include <linux/seq_file.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/mount.h>
  17#include <linux/writeback.h>
  18#include <linux/statfs.h>
  19#include <linux/compat.h>
  20#include <linux/parser.h>
  21#include <linux/ctype.h>
  22#include <linux/namei.h>
  23#include <linux/miscdevice.h>
  24#include <linux/magic.h>
  25#include <linux/slab.h>
  26#include <linux/cleancache.h>
  27#include <linux/ratelimit.h>
  28#include <linux/crc32c.h>
  29#include <linux/btrfs.h>
 
 
 
  30#include "delayed-inode.h"
  31#include "ctree.h"
  32#include "disk-io.h"
  33#include "transaction.h"
  34#include "btrfs_inode.h"
  35#include "print-tree.h"
  36#include "props.h"
  37#include "xattr.h"
  38#include "volumes.h"
  39#include "export.h"
  40#include "compression.h"
  41#include "rcu-string.h"
  42#include "dev-replace.h"
  43#include "free-space-cache.h"
  44#include "backref.h"
  45#include "space-info.h"
  46#include "sysfs.h"
 
  47#include "tests/btrfs-tests.h"
  48#include "block-group.h"
  49
  50#include "qgroup.h"
 
 
 
 
 
 
 
 
 
 
  51#define CREATE_TRACE_POINTS
  52#include <trace/events/btrfs.h>
  53
  54static const struct super_operations btrfs_super_ops;
  55
  56/*
  57 * Types for mounting the default subvolume and a subvolume explicitly
  58 * requested by subvol=/path. That way the callchain is straightforward and we
  59 * don't have to play tricks with the mount options and recursive calls to
  60 * btrfs_mount.
  61 *
  62 * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
  63 */
  64static struct file_system_type btrfs_fs_type;
  65static struct file_system_type btrfs_root_fs_type;
  66
  67static int btrfs_remount(struct super_block *sb, int *flags, char *data);
  68
  69const char *btrfs_decode_error(int errno)
  70{
  71	char *errstr = "unknown";
  72
  73	switch (errno) {
  74	case -EIO:
  75		errstr = "IO failure";
  76		break;
  77	case -ENOMEM:
  78		errstr = "Out of memory";
  79		break;
  80	case -EROFS:
  81		errstr = "Readonly filesystem";
  82		break;
  83	case -EEXIST:
  84		errstr = "Object already exists";
  85		break;
  86	case -ENOSPC:
  87		errstr = "No space left";
  88		break;
  89	case -ENOENT:
  90		errstr = "No such entry";
  91		break;
  92	}
  93
  94	return errstr;
  95}
  96
  97/*
  98 * __btrfs_handle_fs_error decodes expected errors from the caller and
  99 * invokes the appropriate error response.
 100 */
 101__cold
 102void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
 103		       unsigned int line, int errno, const char *fmt, ...)
 104{
 105	struct super_block *sb = fs_info->sb;
 106#ifdef CONFIG_PRINTK
 107	const char *errstr;
 108#endif
 109
 110	/*
 111	 * Special case: if the error is EROFS, and we're already
 112	 * under SB_RDONLY, then it is safe here.
 113	 */
 114	if (errno == -EROFS && sb_rdonly(sb))
 115  		return;
 116
 117#ifdef CONFIG_PRINTK
 118	errstr = btrfs_decode_error(errno);
 119	if (fmt) {
 120		struct va_format vaf;
 121		va_list args;
 122
 123		va_start(args, fmt);
 124		vaf.fmt = fmt;
 125		vaf.va = &args;
 126
 127		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
 128			sb->s_id, function, line, errno, errstr, &vaf);
 129		va_end(args);
 130	} else {
 131		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
 132			sb->s_id, function, line, errno, errstr);
 133	}
 134#endif
 135
 136	/*
 137	 * Today we only save the error info to memory.  Long term we'll
 138	 * also send it down to the disk
 139	 */
 140	set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 141
 142	/* Don't go through full error handling during mount */
 143	if (!(sb->s_flags & SB_BORN))
 144		return;
 145
 146	if (sb_rdonly(sb))
 147		return;
 148
 149	/* btrfs handle error by forcing the filesystem readonly */
 150	sb->s_flags |= SB_RDONLY;
 151	btrfs_info(fs_info, "forced readonly");
 152	/*
 153	 * Note that a running device replace operation is not canceled here
 154	 * although there is no way to update the progress. It would add the
 155	 * risk of a deadlock, therefore the canceling is omitted. The only
 156	 * penalty is that some I/O remains active until the procedure
 157	 * completes. The next time when the filesystem is mounted writable
 158	 * again, the device replace operation continues.
 159	 */
 160}
 161
 162#ifdef CONFIG_PRINTK
 163static const char * const logtypes[] = {
 164	"emergency",
 165	"alert",
 166	"critical",
 167	"error",
 168	"warning",
 169	"notice",
 170	"info",
 171	"debug",
 172};
 173
 174
 175/*
 176 * Use one ratelimit state per log level so that a flood of less important
 177 * messages doesn't cause more important ones to be dropped.
 178 */
 179static struct ratelimit_state printk_limits[] = {
 180	RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
 181	RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
 182	RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
 183	RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
 184	RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
 185	RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
 186	RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
 187	RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
 188};
 189
 190void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 191{
 192	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
 193	struct va_format vaf;
 194	va_list args;
 195	int kern_level;
 196	const char *type = logtypes[4];
 197	struct ratelimit_state *ratelimit = &printk_limits[4];
 198
 199	va_start(args, fmt);
 200
 201	while ((kern_level = printk_get_level(fmt)) != 0) {
 202		size_t size = printk_skip_level(fmt) - fmt;
 203
 204		if (kern_level >= '0' && kern_level <= '7') {
 205			memcpy(lvl, fmt,  size);
 206			lvl[size] = '\0';
 207			type = logtypes[kern_level - '0'];
 208			ratelimit = &printk_limits[kern_level - '0'];
 209		}
 210		fmt += size;
 211	}
 212
 213	vaf.fmt = fmt;
 214	vaf.va = &args;
 215
 216	if (__ratelimit(ratelimit))
 217		printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
 218			fs_info ? fs_info->sb->s_id : "<unknown>", &vaf);
 219
 220	va_end(args);
 221}
 222#endif
 223
 224/*
 225 * We only mark the transaction aborted and then set the file system read-only.
 226 * This will prevent new transactions from starting or trying to join this
 227 * one.
 228 *
 229 * This means that error recovery at the call site is limited to freeing
 230 * any local memory allocations and passing the error code up without
 231 * further cleanup. The transaction should complete as it normally would
 232 * in the call path but will return -EIO.
 233 *
 234 * We'll complete the cleanup in btrfs_end_transaction and
 235 * btrfs_commit_transaction.
 236 */
 237__cold
 238void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
 239			       const char *function,
 240			       unsigned int line, int errno)
 241{
 242	struct btrfs_fs_info *fs_info = trans->fs_info;
 243
 244	trans->aborted = errno;
 245	/* Nothing used. The other threads that have joined this
 246	 * transaction may be able to continue. */
 247	if (!trans->dirty && list_empty(&trans->new_bgs)) {
 248		const char *errstr;
 249
 250		errstr = btrfs_decode_error(errno);
 251		btrfs_warn(fs_info,
 252		           "%s:%d: Aborting unused transaction(%s).",
 253		           function, line, errstr);
 254		return;
 255	}
 256	WRITE_ONCE(trans->transaction->aborted, errno);
 257	/* Wake up anybody who may be waiting on this transaction */
 258	wake_up(&fs_info->transaction_wait);
 259	wake_up(&fs_info->transaction_blocked_wait);
 260	__btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
 261}
 262/*
 263 * __btrfs_panic decodes unexpected, fatal errors from the caller,
 264 * issues an alert, and either panics or BUGs, depending on mount options.
 265 */
 266__cold
 267void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
 268		   unsigned int line, int errno, const char *fmt, ...)
 269{
 270	char *s_id = "<unknown>";
 271	const char *errstr;
 272	struct va_format vaf = { .fmt = fmt };
 273	va_list args;
 274
 275	if (fs_info)
 276		s_id = fs_info->sb->s_id;
 277
 278	va_start(args, fmt);
 279	vaf.va = &args;
 280
 281	errstr = btrfs_decode_error(errno);
 282	if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
 283		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
 284			s_id, function, line, &vaf, errno, errstr);
 285
 286	btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
 287		   function, line, &vaf, errno, errstr);
 288	va_end(args);
 289	/* Caller calls BUG() */
 290}
 291
 292static void btrfs_put_super(struct super_block *sb)
 293{
 294	close_ctree(btrfs_sb(sb));
 295}
 296
 297enum {
 298	Opt_acl, Opt_noacl,
 299	Opt_clear_cache,
 300	Opt_commit_interval,
 301	Opt_compress,
 302	Opt_compress_force,
 303	Opt_compress_force_type,
 304	Opt_compress_type,
 305	Opt_degraded,
 306	Opt_device,
 307	Opt_fatal_errors,
 308	Opt_flushoncommit, Opt_noflushoncommit,
 309	Opt_inode_cache, Opt_noinode_cache,
 310	Opt_max_inline,
 311	Opt_barrier, Opt_nobarrier,
 312	Opt_datacow, Opt_nodatacow,
 313	Opt_datasum, Opt_nodatasum,
 314	Opt_defrag, Opt_nodefrag,
 315	Opt_discard, Opt_nodiscard,
 316	Opt_nologreplay,
 317	Opt_norecovery,
 318	Opt_ratio,
 319	Opt_rescan_uuid_tree,
 320	Opt_skip_balance,
 321	Opt_space_cache, Opt_no_space_cache,
 322	Opt_space_cache_version,
 323	Opt_ssd, Opt_nossd,
 324	Opt_ssd_spread, Opt_nossd_spread,
 325	Opt_subvol,
 326	Opt_subvol_empty,
 327	Opt_subvolid,
 328	Opt_thread_pool,
 329	Opt_treelog, Opt_notreelog,
 330	Opt_usebackuproot,
 331	Opt_user_subvol_rm_allowed,
 332
 333	/* Deprecated options */
 334	Opt_alloc_start,
 335	Opt_recovery,
 336	Opt_subvolrootid,
 
 
 
 337
 338	/* Debugging options */
 339	Opt_check_integrity,
 340	Opt_check_integrity_including_extent_data,
 341	Opt_check_integrity_print_mask,
 342	Opt_enospc_debug, Opt_noenospc_debug,
 343#ifdef CONFIG_BTRFS_DEBUG
 344	Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
 345#endif
 346#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 347	Opt_ref_verify,
 348#endif
 349	Opt_err,
 350};
 351
 352static const match_table_t tokens = {
 353	{Opt_acl, "acl"},
 354	{Opt_noacl, "noacl"},
 355	{Opt_clear_cache, "clear_cache"},
 356	{Opt_commit_interval, "commit=%u"},
 357	{Opt_compress, "compress"},
 358	{Opt_compress_type, "compress=%s"},
 359	{Opt_compress_force, "compress-force"},
 360	{Opt_compress_force_type, "compress-force=%s"},
 361	{Opt_degraded, "degraded"},
 362	{Opt_device, "device=%s"},
 363	{Opt_fatal_errors, "fatal_errors=%s"},
 364	{Opt_flushoncommit, "flushoncommit"},
 365	{Opt_noflushoncommit, "noflushoncommit"},
 366	{Opt_inode_cache, "inode_cache"},
 367	{Opt_noinode_cache, "noinode_cache"},
 368	{Opt_max_inline, "max_inline=%s"},
 369	{Opt_barrier, "barrier"},
 370	{Opt_nobarrier, "nobarrier"},
 371	{Opt_datacow, "datacow"},
 372	{Opt_nodatacow, "nodatacow"},
 373	{Opt_datasum, "datasum"},
 374	{Opt_nodatasum, "nodatasum"},
 375	{Opt_defrag, "autodefrag"},
 376	{Opt_nodefrag, "noautodefrag"},
 377	{Opt_discard, "discard"},
 378	{Opt_nodiscard, "nodiscard"},
 379	{Opt_nologreplay, "nologreplay"},
 380	{Opt_norecovery, "norecovery"},
 381	{Opt_ratio, "metadata_ratio=%u"},
 382	{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
 383	{Opt_skip_balance, "skip_balance"},
 384	{Opt_space_cache, "space_cache"},
 385	{Opt_no_space_cache, "nospace_cache"},
 386	{Opt_space_cache_version, "space_cache=%s"},
 387	{Opt_ssd, "ssd"},
 388	{Opt_nossd, "nossd"},
 389	{Opt_ssd_spread, "ssd_spread"},
 390	{Opt_nossd_spread, "nossd_spread"},
 391	{Opt_subvol, "subvol=%s"},
 392	{Opt_subvol_empty, "subvol="},
 393	{Opt_subvolid, "subvolid=%s"},
 394	{Opt_thread_pool, "thread_pool=%u"},
 395	{Opt_treelog, "treelog"},
 396	{Opt_notreelog, "notreelog"},
 397	{Opt_usebackuproot, "usebackuproot"},
 398	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
 399
 400	/* Deprecated options */
 401	{Opt_alloc_start, "alloc_start=%s"},
 402	{Opt_recovery, "recovery"},
 403	{Opt_subvolrootid, "subvolrootid=%d"},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404
 405	/* Debugging options */
 406	{Opt_check_integrity, "check_int"},
 407	{Opt_check_integrity_including_extent_data, "check_int_data"},
 408	{Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
 409	{Opt_enospc_debug, "enospc_debug"},
 410	{Opt_noenospc_debug, "noenospc_debug"},
 411#ifdef CONFIG_BTRFS_DEBUG
 412	{Opt_fragment_data, "fragment=data"},
 413	{Opt_fragment_metadata, "fragment=metadata"},
 414	{Opt_fragment_all, "fragment=all"},
 415#endif
 416#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 417	{Opt_ref_verify, "ref_verify"},
 418#endif
 419	{Opt_err, NULL},
 420};
 421
 422/*
 423 * Regular mount options parser.  Everything that is needed only when
 424 * reading in a new superblock is parsed here.
 425 * XXX JDM: This needs to be cleaned up for remount.
 426 */
 427int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
 428			unsigned long new_flags)
 429{
 430	substring_t args[MAX_OPT_ARGS];
 431	char *p, *num;
 432	u64 cache_gen;
 433	int intarg;
 434	int ret = 0;
 435	char *compress_type;
 436	bool compress_force = false;
 437	enum btrfs_compression_type saved_compress_type;
 438	bool saved_compress_force;
 439	int no_compress = 0;
 440
 441	cache_gen = btrfs_super_cache_generation(info->super_copy);
 442	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
 443		btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
 444	else if (cache_gen)
 445		btrfs_set_opt(info->mount_opt, SPACE_CACHE);
 446
 447	/*
 448	 * Even the options are empty, we still need to do extra check
 449	 * against new flags
 450	 */
 451	if (!options)
 452		goto check;
 453
 454	while ((p = strsep(&options, ",")) != NULL) {
 455		int token;
 456		if (!*p)
 457			continue;
 458
 459		token = match_token(p, tokens, args);
 460		switch (token) {
 461		case Opt_degraded:
 462			btrfs_info(info, "allowing degraded mounts");
 463			btrfs_set_opt(info->mount_opt, DEGRADED);
 464			break;
 465		case Opt_subvol:
 466		case Opt_subvol_empty:
 467		case Opt_subvolid:
 468		case Opt_subvolrootid:
 469		case Opt_device:
 470			/*
 471			 * These are parsed by btrfs_parse_subvol_options or
 472			 * btrfs_parse_device_options and can be ignored here.
 473			 */
 474			break;
 475		case Opt_nodatasum:
 476			btrfs_set_and_info(info, NODATASUM,
 477					   "setting nodatasum");
 478			break;
 479		case Opt_datasum:
 480			if (btrfs_test_opt(info, NODATASUM)) {
 481				if (btrfs_test_opt(info, NODATACOW))
 482					btrfs_info(info,
 483						   "setting datasum, datacow enabled");
 484				else
 485					btrfs_info(info, "setting datasum");
 486			}
 487			btrfs_clear_opt(info->mount_opt, NODATACOW);
 488			btrfs_clear_opt(info->mount_opt, NODATASUM);
 489			break;
 490		case Opt_nodatacow:
 491			if (!btrfs_test_opt(info, NODATACOW)) {
 492				if (!btrfs_test_opt(info, COMPRESS) ||
 493				    !btrfs_test_opt(info, FORCE_COMPRESS)) {
 494					btrfs_info(info,
 495						   "setting nodatacow, compression disabled");
 496				} else {
 497					btrfs_info(info, "setting nodatacow");
 498				}
 499			}
 500			btrfs_clear_opt(info->mount_opt, COMPRESS);
 501			btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 502			btrfs_set_opt(info->mount_opt, NODATACOW);
 503			btrfs_set_opt(info->mount_opt, NODATASUM);
 504			break;
 505		case Opt_datacow:
 506			btrfs_clear_and_info(info, NODATACOW,
 507					     "setting datacow");
 508			break;
 509		case Opt_compress_force:
 510		case Opt_compress_force_type:
 511			compress_force = true;
 512			/* Fallthrough */
 513		case Opt_compress:
 514		case Opt_compress_type:
 515			saved_compress_type = btrfs_test_opt(info,
 516							     COMPRESS) ?
 517				info->compress_type : BTRFS_COMPRESS_NONE;
 518			saved_compress_force =
 519				btrfs_test_opt(info, FORCE_COMPRESS);
 520			if (token == Opt_compress ||
 521			    token == Opt_compress_force ||
 522			    strncmp(args[0].from, "zlib", 4) == 0) {
 523				compress_type = "zlib";
 524
 525				info->compress_type = BTRFS_COMPRESS_ZLIB;
 526				info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
 527				/*
 528				 * args[0] contains uninitialized data since
 529				 * for these tokens we don't expect any
 530				 * parameter.
 531				 */
 532				if (token != Opt_compress &&
 533				    token != Opt_compress_force)
 534					info->compress_level =
 535					  btrfs_compress_str2level(
 536							BTRFS_COMPRESS_ZLIB,
 537							args[0].from + 4);
 538				btrfs_set_opt(info->mount_opt, COMPRESS);
 539				btrfs_clear_opt(info->mount_opt, NODATACOW);
 540				btrfs_clear_opt(info->mount_opt, NODATASUM);
 541				no_compress = 0;
 542			} else if (strncmp(args[0].from, "lzo", 3) == 0) {
 543				compress_type = "lzo";
 544				info->compress_type = BTRFS_COMPRESS_LZO;
 545				btrfs_set_opt(info->mount_opt, COMPRESS);
 546				btrfs_clear_opt(info->mount_opt, NODATACOW);
 547				btrfs_clear_opt(info->mount_opt, NODATASUM);
 548				btrfs_set_fs_incompat(info, COMPRESS_LZO);
 549				no_compress = 0;
 550			} else if (strncmp(args[0].from, "zstd", 4) == 0) {
 551				compress_type = "zstd";
 552				info->compress_type = BTRFS_COMPRESS_ZSTD;
 553				info->compress_level =
 554					btrfs_compress_str2level(
 555							 BTRFS_COMPRESS_ZSTD,
 556							 args[0].from + 4);
 557				btrfs_set_opt(info->mount_opt, COMPRESS);
 558				btrfs_clear_opt(info->mount_opt, NODATACOW);
 559				btrfs_clear_opt(info->mount_opt, NODATASUM);
 560				btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
 561				no_compress = 0;
 562			} else if (strncmp(args[0].from, "no", 2) == 0) {
 563				compress_type = "no";
 564				btrfs_clear_opt(info->mount_opt, COMPRESS);
 565				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 566				compress_force = false;
 567				no_compress++;
 568			} else {
 569				ret = -EINVAL;
 570				goto out;
 571			}
 572
 573			if (compress_force) {
 574				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
 575			} else {
 576				/*
 577				 * If we remount from compress-force=xxx to
 578				 * compress=xxx, we need clear FORCE_COMPRESS
 579				 * flag, otherwise, there is no way for users
 580				 * to disable forcible compression separately.
 581				 */
 582				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 583			}
 584			if ((btrfs_test_opt(info, COMPRESS) &&
 585			     (info->compress_type != saved_compress_type ||
 586			      compress_force != saved_compress_force)) ||
 587			    (!btrfs_test_opt(info, COMPRESS) &&
 588			     no_compress == 1)) {
 589				btrfs_info(info, "%s %s compression, level %d",
 590					   (compress_force) ? "force" : "use",
 591					   compress_type, info->compress_level);
 592			}
 593			compress_force = false;
 594			break;
 595		case Opt_ssd:
 596			btrfs_set_and_info(info, SSD,
 597					   "enabling ssd optimizations");
 598			btrfs_clear_opt(info->mount_opt, NOSSD);
 599			break;
 600		case Opt_ssd_spread:
 601			btrfs_set_and_info(info, SSD,
 602					   "enabling ssd optimizations");
 603			btrfs_set_and_info(info, SSD_SPREAD,
 604					   "using spread ssd allocation scheme");
 605			btrfs_clear_opt(info->mount_opt, NOSSD);
 606			break;
 607		case Opt_nossd:
 608			btrfs_set_opt(info->mount_opt, NOSSD);
 609			btrfs_clear_and_info(info, SSD,
 610					     "not using ssd optimizations");
 611			/* Fallthrough */
 612		case Opt_nossd_spread:
 613			btrfs_clear_and_info(info, SSD_SPREAD,
 614					     "not using spread ssd allocation scheme");
 615			break;
 616		case Opt_barrier:
 617			btrfs_clear_and_info(info, NOBARRIER,
 618					     "turning on barriers");
 619			break;
 620		case Opt_nobarrier:
 621			btrfs_set_and_info(info, NOBARRIER,
 622					   "turning off barriers");
 623			break;
 624		case Opt_thread_pool:
 625			ret = match_int(&args[0], &intarg);
 626			if (ret) {
 627				goto out;
 628			} else if (intarg == 0) {
 629				ret = -EINVAL;
 630				goto out;
 631			}
 632			info->thread_pool_size = intarg;
 633			break;
 634		case Opt_max_inline:
 635			num = match_strdup(&args[0]);
 636			if (num) {
 637				info->max_inline = memparse(num, NULL);
 638				kfree(num);
 639
 640				if (info->max_inline) {
 641					info->max_inline = min_t(u64,
 642						info->max_inline,
 643						info->sectorsize);
 644				}
 645				btrfs_info(info, "max_inline at %llu",
 646					   info->max_inline);
 647			} else {
 648				ret = -ENOMEM;
 649				goto out;
 650			}
 651			break;
 652		case Opt_alloc_start:
 653			btrfs_info(info,
 654				"option alloc_start is obsolete, ignored");
 655			break;
 656		case Opt_acl:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657#ifdef CONFIG_BTRFS_FS_POSIX_ACL
 658			info->sb->s_flags |= SB_POSIXACL;
 659			break;
 660#else
 661			btrfs_err(info, "support for ACL not compiled in!");
 662			ret = -EINVAL;
 663			goto out;
 664#endif
 665		case Opt_noacl:
 666			info->sb->s_flags &= ~SB_POSIXACL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667			break;
 668		case Opt_notreelog:
 669			btrfs_set_and_info(info, NOTREELOG,
 670					   "disabling tree log");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 671			break;
 672		case Opt_treelog:
 673			btrfs_clear_and_info(info, NOTREELOG,
 674					     "enabling tree log");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 675			break;
 676		case Opt_norecovery:
 677		case Opt_nologreplay:
 678			btrfs_set_and_info(info, NOLOGREPLAY,
 679					   "disabling log replay at mount time");
 680			break;
 681		case Opt_flushoncommit:
 682			btrfs_set_and_info(info, FLUSHONCOMMIT,
 683					   "turning on flush-on-commit");
 684			break;
 685		case Opt_noflushoncommit:
 686			btrfs_clear_and_info(info, FLUSHONCOMMIT,
 687					     "turning off flush-on-commit");
 688			break;
 689		case Opt_ratio:
 690			ret = match_int(&args[0], &intarg);
 691			if (ret)
 692				goto out;
 693			info->metadata_ratio = intarg;
 694			btrfs_info(info, "metadata ratio %u",
 695				   info->metadata_ratio);
 696			break;
 697		case Opt_discard:
 698			btrfs_set_and_info(info, DISCARD,
 699					   "turning on discard");
 700			break;
 701		case Opt_nodiscard:
 702			btrfs_clear_and_info(info, DISCARD,
 703					     "turning off discard");
 704			break;
 705		case Opt_space_cache:
 706		case Opt_space_cache_version:
 707			if (token == Opt_space_cache ||
 708			    strcmp(args[0].from, "v1") == 0) {
 709				btrfs_clear_opt(info->mount_opt,
 710						FREE_SPACE_TREE);
 711				btrfs_set_and_info(info, SPACE_CACHE,
 712					   "enabling disk space caching");
 713			} else if (strcmp(args[0].from, "v2") == 0) {
 714				btrfs_clear_opt(info->mount_opt,
 715						SPACE_CACHE);
 716				btrfs_set_and_info(info, FREE_SPACE_TREE,
 717						   "enabling free space tree");
 718			} else {
 719				ret = -EINVAL;
 720				goto out;
 721			}
 722			break;
 723		case Opt_rescan_uuid_tree:
 724			btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
 725			break;
 726		case Opt_no_space_cache:
 727			if (btrfs_test_opt(info, SPACE_CACHE)) {
 728				btrfs_clear_and_info(info, SPACE_CACHE,
 729					     "disabling disk space caching");
 730			}
 731			if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
 732				btrfs_clear_and_info(info, FREE_SPACE_TREE,
 733					     "disabling free space tree");
 734			}
 735			break;
 736		case Opt_inode_cache:
 737			btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
 738					   "enabling inode map caching");
 739			break;
 740		case Opt_noinode_cache:
 741			btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
 742					     "disabling inode map caching");
 743			break;
 744		case Opt_clear_cache:
 745			btrfs_set_and_info(info, CLEAR_CACHE,
 746					   "force clearing of disk cache");
 747			break;
 748		case Opt_user_subvol_rm_allowed:
 749			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
 750			break;
 751		case Opt_enospc_debug:
 752			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
 753			break;
 754		case Opt_noenospc_debug:
 755			btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
 756			break;
 757		case Opt_defrag:
 758			btrfs_set_and_info(info, AUTO_DEFRAG,
 759					   "enabling auto defrag");
 760			break;
 761		case Opt_nodefrag:
 762			btrfs_clear_and_info(info, AUTO_DEFRAG,
 763					     "disabling auto defrag");
 764			break;
 765		case Opt_recovery:
 766			btrfs_warn(info,
 767				   "'recovery' is deprecated, use 'usebackuproot' instead");
 768			/* fall through */
 769		case Opt_usebackuproot:
 770			btrfs_info(info,
 771				   "trying to use backup root at mount time");
 772			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 773			break;
 774		case Opt_skip_balance:
 775			btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
 776			break;
 777#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 778		case Opt_check_integrity_including_extent_data:
 779			btrfs_info(info,
 780				   "enabling check integrity including extent data");
 781			btrfs_set_opt(info->mount_opt,
 782				      CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
 783			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 784			break;
 785		case Opt_check_integrity:
 786			btrfs_info(info, "enabling check integrity");
 787			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 788			break;
 789		case Opt_check_integrity_print_mask:
 790			ret = match_int(&args[0], &intarg);
 791			if (ret)
 792				goto out;
 793			info->check_integrity_print_mask = intarg;
 794			btrfs_info(info, "check_integrity_print_mask 0x%x",
 795				   info->check_integrity_print_mask);
 796			break;
 797#else
 798		case Opt_check_integrity_including_extent_data:
 799		case Opt_check_integrity:
 800		case Opt_check_integrity_print_mask:
 801			btrfs_err(info,
 802				  "support for check_integrity* not compiled in!");
 803			ret = -EINVAL;
 804			goto out;
 805#endif
 806		case Opt_fatal_errors:
 807			if (strcmp(args[0].from, "panic") == 0)
 808				btrfs_set_opt(info->mount_opt,
 809					      PANIC_ON_FATAL_ERROR);
 810			else if (strcmp(args[0].from, "bug") == 0)
 811				btrfs_clear_opt(info->mount_opt,
 812					      PANIC_ON_FATAL_ERROR);
 813			else {
 814				ret = -EINVAL;
 815				goto out;
 816			}
 817			break;
 818		case Opt_commit_interval:
 819			intarg = 0;
 820			ret = match_int(&args[0], &intarg);
 821			if (ret)
 822				goto out;
 823			if (intarg == 0) {
 824				btrfs_info(info,
 825					   "using default commit interval %us",
 826					   BTRFS_DEFAULT_COMMIT_INTERVAL);
 827				intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
 828			} else if (intarg > 300) {
 829				btrfs_warn(info, "excessive commit interval %d",
 830					   intarg);
 831			}
 832			info->commit_interval = intarg;
 833			break;
 
 
 
 
 
 
 834#ifdef CONFIG_BTRFS_DEBUG
 835		case Opt_fragment_all:
 836			btrfs_info(info, "fragmenting all space");
 837			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 838			btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
 
 839			break;
 840		case Opt_fragment_metadata:
 841			btrfs_info(info, "fragmenting metadata");
 842			btrfs_set_opt(info->mount_opt,
 843				      FRAGMENT_METADATA);
 844			break;
 845		case Opt_fragment_data:
 846			btrfs_info(info, "fragmenting data");
 847			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 848			break;
 
 
 
 
 
 
 849#endif
 850#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 851		case Opt_ref_verify:
 852			btrfs_info(info, "doing ref verification");
 853			btrfs_set_opt(info->mount_opt, REF_VERIFY);
 854			break;
 855#endif
 856		case Opt_err:
 857			btrfs_info(info, "unrecognized mount option '%s'", p);
 858			ret = -EINVAL;
 859			goto out;
 860		default:
 861			break;
 862		}
 863	}
 864check:
 865	/*
 866	 * Extra check for current option against current flag
 867	 */
 868	if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
 869		btrfs_err(info,
 870			  "nologreplay must be used with ro mount option");
 871		ret = -EINVAL;
 872	}
 873out:
 874	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
 875	    !btrfs_test_opt(info, FREE_SPACE_TREE) &&
 876	    !btrfs_test_opt(info, CLEAR_CACHE)) {
 877		btrfs_err(info, "cannot disable free space tree");
 878		ret = -EINVAL;
 879
 880	}
 881	if (!ret && btrfs_test_opt(info, SPACE_CACHE))
 882		btrfs_info(info, "disk space caching is enabled");
 883	if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
 884		btrfs_info(info, "using free space tree");
 885	return ret;
 886}
 887
 888/*
 889 * Parse mount options that are required early in the mount process.
 890 *
 891 * All other options will be parsed on much later in the mount process and
 892 * only when we need to allocate a new super block.
 893 */
 894static int btrfs_parse_device_options(const char *options, fmode_t flags,
 895				      void *holder)
 896{
 897	substring_t args[MAX_OPT_ARGS];
 898	char *device_name, *opts, *orig, *p;
 899	struct btrfs_device *device = NULL;
 900	int error = 0;
 901
 902	lockdep_assert_held(&uuid_mutex);
 
 
 
 
 
 
 
 
 
 
 903
 904	if (!options)
 905		return 0;
 906
 907	/*
 908	 * strsep changes the string, duplicate it because btrfs_parse_options
 909	 * gets called later
 910	 */
 911	opts = kstrdup(options, GFP_KERNEL);
 912	if (!opts)
 913		return -ENOMEM;
 914	orig = opts;
 915
 916	while ((p = strsep(&opts, ",")) != NULL) {
 917		int token;
 
 
 
 918
 919		if (!*p)
 920			continue;
 921
 922		token = match_token(p, tokens, args);
 923		if (token == Opt_device) {
 924			device_name = match_strdup(&args[0]);
 925			if (!device_name) {
 926				error = -ENOMEM;
 927				goto out;
 928			}
 929			device = btrfs_scan_one_device(device_name, flags,
 930					holder);
 931			kfree(device_name);
 932			if (IS_ERR(device)) {
 933				error = PTR_ERR(device);
 934				goto out;
 935			}
 936		}
 
 
 937	}
 938
 939out:
 940	kfree(orig);
 941	return error;
 942}
 943
 944/*
 945 * Parse mount options that are related to subvolume id
 
 
 
 946 *
 947 * The value is later passed to mount_subvol()
 
 
 
 
 948 */
 949static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
 950		u64 *subvol_objectid)
 951{
 952	substring_t args[MAX_OPT_ARGS];
 953	char *opts, *orig, *p;
 954	int error = 0;
 955	u64 subvolid;
 956
 957	if (!options)
 958		return 0;
 
 
 959
 960	/*
 961	 * strsep changes the string, duplicate it because
 962	 * btrfs_parse_device_options gets called later
 963	 */
 964	opts = kstrdup(options, GFP_KERNEL);
 965	if (!opts)
 966		return -ENOMEM;
 967	orig = opts;
 
 
 
 
 
 968
 969	while ((p = strsep(&opts, ",")) != NULL) {
 970		int token;
 971		if (!*p)
 972			continue;
 973
 974		token = match_token(p, tokens, args);
 975		switch (token) {
 976		case Opt_subvol:
 977			kfree(*subvol_name);
 978			*subvol_name = match_strdup(&args[0]);
 979			if (!*subvol_name) {
 980				error = -ENOMEM;
 981				goto out;
 982			}
 983			break;
 984		case Opt_subvolid:
 985			error = match_u64(&args[0], &subvolid);
 986			if (error)
 987				goto out;
 988
 989			/* we want the original fs_tree */
 990			if (subvolid == 0)
 991				subvolid = BTRFS_FS_TREE_OBJECTID;
 
 
 
 
 
 
 992
 993			*subvol_objectid = subvolid;
 994			break;
 995		case Opt_subvolrootid:
 996			pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
 997			break;
 998		default:
 999			break;
1000		}
1001	}
1002
1003out:
1004	kfree(orig);
1005	return error;
 
 
 
 
 
 
 
 
 
 
 
 
1006}
1007
1008static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1009					   u64 subvol_objectid)
1010{
1011	struct btrfs_root *root = fs_info->tree_root;
1012	struct btrfs_root *fs_root;
1013	struct btrfs_root_ref *root_ref;
1014	struct btrfs_inode_ref *inode_ref;
1015	struct btrfs_key key;
1016	struct btrfs_path *path = NULL;
1017	char *name = NULL, *ptr;
1018	u64 dirid;
1019	int len;
1020	int ret;
1021
1022	path = btrfs_alloc_path();
1023	if (!path) {
1024		ret = -ENOMEM;
1025		goto err;
1026	}
1027	path->leave_spinning = 1;
1028
1029	name = kmalloc(PATH_MAX, GFP_KERNEL);
1030	if (!name) {
1031		ret = -ENOMEM;
1032		goto err;
1033	}
1034	ptr = name + PATH_MAX - 1;
1035	ptr[0] = '\0';
1036
1037	/*
1038	 * Walk up the subvolume trees in the tree of tree roots by root
1039	 * backrefs until we hit the top-level subvolume.
1040	 */
1041	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
1042		key.objectid = subvol_objectid;
1043		key.type = BTRFS_ROOT_BACKREF_KEY;
1044		key.offset = (u64)-1;
1045
1046		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1047		if (ret < 0) {
1048			goto err;
1049		} else if (ret > 0) {
1050			ret = btrfs_previous_item(root, path, subvol_objectid,
1051						  BTRFS_ROOT_BACKREF_KEY);
1052			if (ret < 0) {
1053				goto err;
1054			} else if (ret > 0) {
1055				ret = -ENOENT;
1056				goto err;
1057			}
1058		}
1059
1060		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1061		subvol_objectid = key.offset;
1062
1063		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1064					  struct btrfs_root_ref);
1065		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
1066		ptr -= len + 1;
1067		if (ptr < name) {
1068			ret = -ENAMETOOLONG;
1069			goto err;
1070		}
1071		read_extent_buffer(path->nodes[0], ptr + 1,
1072				   (unsigned long)(root_ref + 1), len);
1073		ptr[0] = '/';
1074		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
1075		btrfs_release_path(path);
1076
1077		key.objectid = subvol_objectid;
1078		key.type = BTRFS_ROOT_ITEM_KEY;
1079		key.offset = (u64)-1;
1080		fs_root = btrfs_read_fs_root_no_name(fs_info, &key);
1081		if (IS_ERR(fs_root)) {
1082			ret = PTR_ERR(fs_root);
 
1083			goto err;
1084		}
1085
1086		/*
1087		 * Walk up the filesystem tree by inode refs until we hit the
1088		 * root directory.
1089		 */
1090		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
1091			key.objectid = dirid;
1092			key.type = BTRFS_INODE_REF_KEY;
1093			key.offset = (u64)-1;
1094
1095			ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1096			if (ret < 0) {
1097				goto err;
1098			} else if (ret > 0) {
1099				ret = btrfs_previous_item(fs_root, path, dirid,
1100							  BTRFS_INODE_REF_KEY);
1101				if (ret < 0) {
1102					goto err;
1103				} else if (ret > 0) {
1104					ret = -ENOENT;
1105					goto err;
1106				}
1107			}
1108
1109			btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1110			dirid = key.offset;
1111
1112			inode_ref = btrfs_item_ptr(path->nodes[0],
1113						   path->slots[0],
1114						   struct btrfs_inode_ref);
1115			len = btrfs_inode_ref_name_len(path->nodes[0],
1116						       inode_ref);
1117			ptr -= len + 1;
1118			if (ptr < name) {
1119				ret = -ENAMETOOLONG;
1120				goto err;
1121			}
1122			read_extent_buffer(path->nodes[0], ptr + 1,
1123					   (unsigned long)(inode_ref + 1), len);
1124			ptr[0] = '/';
1125			btrfs_release_path(path);
1126		}
 
 
1127	}
1128
1129	btrfs_free_path(path);
1130	if (ptr == name + PATH_MAX - 1) {
1131		name[0] = '/';
1132		name[1] = '\0';
1133	} else {
1134		memmove(name, ptr, name + PATH_MAX - ptr);
1135	}
1136	return name;
1137
1138err:
 
1139	btrfs_free_path(path);
1140	kfree(name);
1141	return ERR_PTR(ret);
1142}
1143
1144static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
1145{
1146	struct btrfs_root *root = fs_info->tree_root;
1147	struct btrfs_dir_item *di;
1148	struct btrfs_path *path;
1149	struct btrfs_key location;
 
1150	u64 dir_id;
1151
1152	path = btrfs_alloc_path();
1153	if (!path)
1154		return -ENOMEM;
1155	path->leave_spinning = 1;
1156
1157	/*
1158	 * Find the "default" dir item which points to the root item that we
1159	 * will mount by default if we haven't been given a specific subvolume
1160	 * to mount.
1161	 */
1162	dir_id = btrfs_super_root_dir(fs_info->super_copy);
1163	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
1164	if (IS_ERR(di)) {
1165		btrfs_free_path(path);
1166		return PTR_ERR(di);
1167	}
1168	if (!di) {
1169		/*
1170		 * Ok the default dir item isn't there.  This is weird since
1171		 * it's always been there, but don't freak out, just try and
1172		 * mount the top-level subvolume.
1173		 */
1174		btrfs_free_path(path);
1175		*objectid = BTRFS_FS_TREE_OBJECTID;
1176		return 0;
1177	}
1178
1179	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1180	btrfs_free_path(path);
1181	*objectid = location.objectid;
1182	return 0;
1183}
1184
1185static int btrfs_fill_super(struct super_block *sb,
1186			    struct btrfs_fs_devices *fs_devices,
1187			    void *data)
1188{
1189	struct inode *inode;
1190	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1191	struct btrfs_key key;
1192	int err;
1193
1194	sb->s_maxbytes = MAX_LFS_FILESIZE;
1195	sb->s_magic = BTRFS_SUPER_MAGIC;
1196	sb->s_op = &btrfs_super_ops;
1197	sb->s_d_op = &btrfs_dentry_operations;
1198	sb->s_export_op = &btrfs_export_ops;
 
 
 
1199	sb->s_xattr = btrfs_xattr_handlers;
1200	sb->s_time_gran = 1;
1201#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1202	sb->s_flags |= SB_POSIXACL;
1203#endif
1204	sb->s_flags |= SB_I_VERSION;
1205	sb->s_iflags |= SB_I_CGROUPWB;
1206
1207	err = super_setup_bdi(sb);
1208	if (err) {
1209		btrfs_err(fs_info, "super_setup_bdi failed");
1210		return err;
1211	}
1212
1213	err = open_ctree(sb, fs_devices, (char *)data);
1214	if (err) {
1215		btrfs_err(fs_info, "open_ctree failed");
1216		return err;
1217	}
1218
1219	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
1220	key.type = BTRFS_INODE_ITEM_KEY;
1221	key.offset = 0;
1222	inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
1223	if (IS_ERR(inode)) {
1224		err = PTR_ERR(inode);
 
1225		goto fail_close;
1226	}
1227
1228	sb->s_root = d_make_root(inode);
1229	if (!sb->s_root) {
1230		err = -ENOMEM;
1231		goto fail_close;
1232	}
1233
1234	cleancache_init_fs(sb);
1235	sb->s_flags |= SB_ACTIVE;
1236	return 0;
1237
1238fail_close:
1239	close_ctree(fs_info);
1240	return err;
1241}
1242
1243int btrfs_sync_fs(struct super_block *sb, int wait)
1244{
1245	struct btrfs_trans_handle *trans;
1246	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1247	struct btrfs_root *root = fs_info->tree_root;
1248
1249	trace_btrfs_sync_fs(fs_info, wait);
1250
1251	if (!wait) {
1252		filemap_flush(fs_info->btree_inode->i_mapping);
1253		return 0;
1254	}
1255
1256	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1257
1258	trans = btrfs_attach_transaction_barrier(root);
1259	if (IS_ERR(trans)) {
1260		/* no transaction, don't bother */
1261		if (PTR_ERR(trans) == -ENOENT) {
1262			/*
1263			 * Exit unless we have some pending changes
1264			 * that need to go through commit
1265			 */
1266			if (fs_info->pending_changes == 0)
 
1267				return 0;
1268			/*
1269			 * A non-blocking test if the fs is frozen. We must not
1270			 * start a new transaction here otherwise a deadlock
1271			 * happens. The pending operations are delayed to the
1272			 * next commit after thawing.
1273			 */
1274			if (sb_start_write_trylock(sb))
1275				sb_end_write(sb);
1276			else
1277				return 0;
1278			trans = btrfs_start_transaction(root, 0);
1279		}
1280		if (IS_ERR(trans))
1281			return PTR_ERR(trans);
1282	}
1283	return btrfs_commit_transaction(trans);
1284}
1285
 
 
 
 
 
 
1286static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1287{
1288	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1289	const char *compress_type;
 
 
1290
1291	if (btrfs_test_opt(info, DEGRADED))
1292		seq_puts(seq, ",degraded");
1293	if (btrfs_test_opt(info, NODATASUM))
1294		seq_puts(seq, ",nodatasum");
1295	if (btrfs_test_opt(info, NODATACOW))
1296		seq_puts(seq, ",nodatacow");
1297	if (btrfs_test_opt(info, NOBARRIER))
1298		seq_puts(seq, ",nobarrier");
1299	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1300		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1301	if (info->thread_pool_size !=  min_t(unsigned long,
1302					     num_online_cpus() + 2, 8))
1303		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1304	if (btrfs_test_opt(info, COMPRESS)) {
1305		compress_type = btrfs_compress_type2str(info->compress_type);
1306		if (btrfs_test_opt(info, FORCE_COMPRESS))
1307			seq_printf(seq, ",compress-force=%s", compress_type);
1308		else
1309			seq_printf(seq, ",compress=%s", compress_type);
1310		if (info->compress_level)
1311			seq_printf(seq, ":%d", info->compress_level);
1312	}
1313	if (btrfs_test_opt(info, NOSSD))
1314		seq_puts(seq, ",nossd");
1315	if (btrfs_test_opt(info, SSD_SPREAD))
1316		seq_puts(seq, ",ssd_spread");
1317	else if (btrfs_test_opt(info, SSD))
1318		seq_puts(seq, ",ssd");
1319	if (btrfs_test_opt(info, NOTREELOG))
1320		seq_puts(seq, ",notreelog");
1321	if (btrfs_test_opt(info, NOLOGREPLAY))
1322		seq_puts(seq, ",nologreplay");
 
 
 
 
 
 
1323	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1324		seq_puts(seq, ",flushoncommit");
1325	if (btrfs_test_opt(info, DISCARD))
1326		seq_puts(seq, ",discard");
 
 
1327	if (!(info->sb->s_flags & SB_POSIXACL))
1328		seq_puts(seq, ",noacl");
1329	if (btrfs_test_opt(info, SPACE_CACHE))
1330		seq_puts(seq, ",space_cache");
1331	else if (btrfs_test_opt(info, FREE_SPACE_TREE))
1332		seq_puts(seq, ",space_cache=v2");
1333	else
1334		seq_puts(seq, ",nospace_cache");
1335	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1336		seq_puts(seq, ",rescan_uuid_tree");
1337	if (btrfs_test_opt(info, CLEAR_CACHE))
1338		seq_puts(seq, ",clear_cache");
1339	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1340		seq_puts(seq, ",user_subvol_rm_allowed");
1341	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1342		seq_puts(seq, ",enospc_debug");
1343	if (btrfs_test_opt(info, AUTO_DEFRAG))
1344		seq_puts(seq, ",autodefrag");
1345	if (btrfs_test_opt(info, INODE_MAP_CACHE))
1346		seq_puts(seq, ",inode_cache");
1347	if (btrfs_test_opt(info, SKIP_BALANCE))
1348		seq_puts(seq, ",skip_balance");
1349#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1350	if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
1351		seq_puts(seq, ",check_int_data");
1352	else if (btrfs_test_opt(info, CHECK_INTEGRITY))
1353		seq_puts(seq, ",check_int");
1354	if (info->check_integrity_print_mask)
1355		seq_printf(seq, ",check_int_print_mask=%d",
1356				info->check_integrity_print_mask);
1357#endif
1358	if (info->metadata_ratio)
1359		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1360	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1361		seq_puts(seq, ",fatal_errors=panic");
1362	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1363		seq_printf(seq, ",commit=%u", info->commit_interval);
1364#ifdef CONFIG_BTRFS_DEBUG
1365	if (btrfs_test_opt(info, FRAGMENT_DATA))
1366		seq_puts(seq, ",fragment=data");
1367	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1368		seq_puts(seq, ",fragment=metadata");
1369#endif
1370	if (btrfs_test_opt(info, REF_VERIFY))
1371		seq_puts(seq, ",ref_verify");
1372	seq_printf(seq, ",subvolid=%llu",
1373		  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1374	seq_puts(seq, ",subvol=");
1375	seq_dentry(seq, dentry, " \t\n\\");
 
 
 
 
 
1376	return 0;
1377}
1378
1379static int btrfs_test_super(struct super_block *s, void *data)
1380{
1381	struct btrfs_fs_info *p = data;
1382	struct btrfs_fs_info *fs_info = btrfs_sb(s);
1383
1384	return fs_info->fs_devices == p->fs_devices;
1385}
1386
1387static int btrfs_set_super(struct super_block *s, void *data)
1388{
1389	int err = set_anon_super(s, data);
1390	if (!err)
1391		s->s_fs_info = data;
1392	return err;
1393}
1394
1395/*
1396 * subvolumes are identified by ino 256
1397 */
1398static inline int is_subvolume_inode(struct inode *inode)
1399{
1400	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1401		return 1;
1402	return 0;
1403}
1404
1405static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1406				   struct vfsmount *mnt)
1407{
1408	struct dentry *root;
1409	int ret;
1410
1411	if (!subvol_name) {
1412		if (!subvol_objectid) {
1413			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1414							  &subvol_objectid);
1415			if (ret) {
1416				root = ERR_PTR(ret);
1417				goto out;
1418			}
1419		}
1420		subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
1421							    subvol_objectid);
1422		if (IS_ERR(subvol_name)) {
1423			root = ERR_CAST(subvol_name);
1424			subvol_name = NULL;
1425			goto out;
1426		}
1427
1428	}
1429
1430	root = mount_subtree(mnt, subvol_name);
1431	/* mount_subtree() drops our reference on the vfsmount. */
1432	mnt = NULL;
1433
1434	if (!IS_ERR(root)) {
1435		struct super_block *s = root->d_sb;
1436		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1437		struct inode *root_inode = d_inode(root);
1438		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1439
1440		ret = 0;
1441		if (!is_subvolume_inode(root_inode)) {
1442			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1443			       subvol_name);
1444			ret = -EINVAL;
1445		}
1446		if (subvol_objectid && root_objectid != subvol_objectid) {
1447			/*
1448			 * This will also catch a race condition where a
1449			 * subvolume which was passed by ID is renamed and
1450			 * another subvolume is renamed over the old location.
1451			 */
1452			btrfs_err(fs_info,
1453				  "subvol '%s' does not match subvolid %llu",
1454				  subvol_name, subvol_objectid);
1455			ret = -EINVAL;
1456		}
1457		if (ret) {
1458			dput(root);
1459			root = ERR_PTR(ret);
1460			deactivate_locked_super(s);
1461		}
1462	}
1463
1464out:
1465	mntput(mnt);
1466	kfree(subvol_name);
1467	return root;
1468}
1469
1470/*
1471 * Find a superblock for the given device / mount point.
1472 *
1473 * Note: This is based on mount_bdev from fs/super.c with a few additions
1474 *       for multiple device setup.  Make sure to keep it in sync.
1475 */
1476static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
1477		int flags, const char *device_name, void *data)
1478{
1479	struct block_device *bdev = NULL;
1480	struct super_block *s;
1481	struct btrfs_device *device = NULL;
1482	struct btrfs_fs_devices *fs_devices = NULL;
1483	struct btrfs_fs_info *fs_info = NULL;
1484	void *new_sec_opts = NULL;
1485	fmode_t mode = FMODE_READ;
1486	int error = 0;
1487
1488	if (!(flags & SB_RDONLY))
1489		mode |= FMODE_WRITE;
1490
1491	if (data) {
1492		error = security_sb_eat_lsm_opts(data, &new_sec_opts);
1493		if (error)
1494			return ERR_PTR(error);
1495	}
1496
1497	/*
1498	 * Setup a dummy root and fs_info for test/set super.  This is because
1499	 * we don't actually fill this stuff out until open_ctree, but we need
1500	 * it for searching for existing supers, so this lets us do that and
1501	 * then open_ctree will properly initialize everything later.
1502	 */
1503	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
1504	if (!fs_info) {
1505		error = -ENOMEM;
1506		goto error_sec_opts;
1507	}
1508
1509	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1510	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1511	if (!fs_info->super_copy || !fs_info->super_for_commit) {
1512		error = -ENOMEM;
1513		goto error_fs_info;
1514	}
1515
1516	mutex_lock(&uuid_mutex);
1517	error = btrfs_parse_device_options(data, mode, fs_type);
1518	if (error) {
1519		mutex_unlock(&uuid_mutex);
1520		goto error_fs_info;
1521	}
1522
1523	device = btrfs_scan_one_device(device_name, mode, fs_type);
1524	if (IS_ERR(device)) {
1525		mutex_unlock(&uuid_mutex);
1526		error = PTR_ERR(device);
1527		goto error_fs_info;
1528	}
1529
1530	fs_devices = device->fs_devices;
1531	fs_info->fs_devices = fs_devices;
1532
1533	error = btrfs_open_devices(fs_devices, mode, fs_type);
1534	mutex_unlock(&uuid_mutex);
1535	if (error)
1536		goto error_fs_info;
1537
1538	if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1539		error = -EACCES;
1540		goto error_close_devices;
1541	}
1542
1543	bdev = fs_devices->latest_bdev;
1544	s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
1545		 fs_info);
1546	if (IS_ERR(s)) {
1547		error = PTR_ERR(s);
1548		goto error_close_devices;
1549	}
1550
1551	if (s->s_root) {
1552		btrfs_close_devices(fs_devices);
1553		free_fs_info(fs_info);
1554		if ((flags ^ s->s_flags) & SB_RDONLY)
1555			error = -EBUSY;
1556	} else {
1557		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1558		btrfs_sb(s)->bdev_holder = fs_type;
1559		if (!strstr(crc32c_impl(), "generic"))
1560			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1561		error = btrfs_fill_super(s, fs_devices, data);
1562	}
1563	if (!error)
1564		error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL);
1565	security_free_mnt_opts(&new_sec_opts);
1566	if (error) {
1567		deactivate_locked_super(s);
1568		return ERR_PTR(error);
1569	}
1570
1571	return dget(s->s_root);
1572
1573error_close_devices:
1574	btrfs_close_devices(fs_devices);
1575error_fs_info:
1576	free_fs_info(fs_info);
1577error_sec_opts:
1578	security_free_mnt_opts(&new_sec_opts);
1579	return ERR_PTR(error);
1580}
1581
1582/*
1583 * Mount function which is called by VFS layer.
1584 *
1585 * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
1586 * which needs vfsmount* of device's root (/).  This means device's root has to
1587 * be mounted internally in any case.
1588 *
1589 * Operation flow:
1590 *   1. Parse subvol id related options for later use in mount_subvol().
1591 *
1592 *   2. Mount device's root (/) by calling vfs_kern_mount().
1593 *
1594 *      NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
1595 *      first place. In order to avoid calling btrfs_mount() again, we use
1596 *      different file_system_type which is not registered to VFS by
1597 *      register_filesystem() (btrfs_root_fs_type). As a result,
1598 *      btrfs_mount_root() is called. The return value will be used by
1599 *      mount_subtree() in mount_subvol().
1600 *
1601 *   3. Call mount_subvol() to get the dentry of subvolume. Since there is
1602 *      "btrfs subvolume set-default", mount_subvol() is called always.
1603 */
1604static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1605		const char *device_name, void *data)
1606{
1607	struct vfsmount *mnt_root;
1608	struct dentry *root;
1609	char *subvol_name = NULL;
1610	u64 subvol_objectid = 0;
1611	int error = 0;
1612
1613	error = btrfs_parse_subvol_options(data, &subvol_name,
1614					&subvol_objectid);
1615	if (error) {
1616		kfree(subvol_name);
1617		return ERR_PTR(error);
1618	}
1619
1620	/* mount device's root (/) */
1621	mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
1622	if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
1623		if (flags & SB_RDONLY) {
1624			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1625				flags & ~SB_RDONLY, device_name, data);
1626		} else {
1627			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1628				flags | SB_RDONLY, device_name, data);
1629			if (IS_ERR(mnt_root)) {
1630				root = ERR_CAST(mnt_root);
1631				kfree(subvol_name);
1632				goto out;
1633			}
1634
1635			down_write(&mnt_root->mnt_sb->s_umount);
1636			error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
1637			up_write(&mnt_root->mnt_sb->s_umount);
1638			if (error < 0) {
1639				root = ERR_PTR(error);
1640				mntput(mnt_root);
1641				kfree(subvol_name);
1642				goto out;
1643			}
1644		}
1645	}
1646	if (IS_ERR(mnt_root)) {
1647		root = ERR_CAST(mnt_root);
1648		kfree(subvol_name);
1649		goto out;
1650	}
1651
1652	/* mount_subvol() will free subvol_name and mnt_root */
1653	root = mount_subvol(subvol_name, subvol_objectid, mnt_root);
1654
1655out:
1656	return root;
1657}
1658
1659static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1660				     u32 new_pool_size, u32 old_pool_size)
1661{
1662	if (new_pool_size == old_pool_size)
1663		return;
1664
1665	fs_info->thread_pool_size = new_pool_size;
1666
1667	btrfs_info(fs_info, "resize thread pool %d -> %d",
1668	       old_pool_size, new_pool_size);
1669
1670	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1671	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
1672	btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
1673	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1674	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
1675	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
1676	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
1677				new_pool_size);
1678	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1679	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1680	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1681	btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
1682	btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
1683				new_pool_size);
1684}
1685
1686static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info)
1687{
1688	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1689}
1690
1691static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1692				       unsigned long old_opts, int flags)
1693{
1694	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1695	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1696	     (flags & SB_RDONLY))) {
1697		/* wait for any defraggers to finish */
1698		wait_event(fs_info->transaction_wait,
1699			   (atomic_read(&fs_info->defrag_running) == 0));
1700		if (flags & SB_RDONLY)
1701			sync_filesystem(fs_info->sb);
1702	}
1703}
1704
1705static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1706					 unsigned long old_opts)
1707{
 
 
1708	/*
1709	 * We need to cleanup all defragable inodes if the autodefragment is
1710	 * close or the filesystem is read only.
1711	 */
1712	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1713	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1714		btrfs_cleanup_defrag_inodes(fs_info);
1715	}
1716
1717	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
 
 
 
 
 
 
 
 
 
 
1718}
1719
1720static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1721{
1722	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1723	struct btrfs_root *root = fs_info->tree_root;
1724	unsigned old_flags = sb->s_flags;
1725	unsigned long old_opts = fs_info->mount_opt;
1726	unsigned long old_compress_type = fs_info->compress_type;
1727	u64 old_max_inline = fs_info->max_inline;
1728	u32 old_thread_pool_size = fs_info->thread_pool_size;
1729	u32 old_metadata_ratio = fs_info->metadata_ratio;
1730	int ret;
1731
1732	sync_filesystem(sb);
1733	btrfs_remount_prepare(fs_info);
 
 
 
 
 
 
1734
1735	if (data) {
1736		void *new_sec_opts = NULL;
 
 
 
1737
1738		ret = security_sb_eat_lsm_opts(data, &new_sec_opts);
1739		if (!ret)
1740			ret = security_sb_remount(sb, new_sec_opts);
1741		security_free_mnt_opts(&new_sec_opts);
1742		if (ret)
1743			goto restore;
1744	}
1745
1746	ret = btrfs_parse_options(fs_info, data, *flags);
 
 
 
 
 
1747	if (ret)
1748		goto restore;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1749
1750	btrfs_remount_begin(fs_info, old_opts, *flags);
1751	btrfs_resize_thread_pool(fs_info,
1752		fs_info->thread_pool_size, old_thread_pool_size);
 
 
 
 
 
1753
1754	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
1755		goto out;
 
1756
1757	if (*flags & SB_RDONLY) {
1758		/*
1759		 * this also happens on 'umount -rf' or on shutdown, when
1760		 * the filesystem is busy.
1761		 */
1762		cancel_work_sync(&fs_info->async_reclaim_work);
 
1763
1764		/* wait for the uuid_scan task to finish */
1765		down(&fs_info->uuid_tree_rescan_sem);
1766		/* avoid complains from lockdep et al. */
1767		up(&fs_info->uuid_tree_rescan_sem);
1768
1769		sb->s_flags |= SB_RDONLY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1770
1771		/*
1772		 * Setting SB_RDONLY will put the cleaner thread to
1773		 * sleep at the next loop if it's already active.
1774		 * If it's already asleep, we'll leave unused block
1775		 * groups on disk until we're mounted read-write again
1776		 * unless we clean them up here.
1777		 */
1778		btrfs_delete_unused_bgs(fs_info);
1779
1780		btrfs_dev_replace_suspend_for_unmount(fs_info);
1781		btrfs_scrub_cancel(fs_info);
1782		btrfs_pause_balance(fs_info);
1783
1784		ret = btrfs_commit_super(fs_info);
1785		if (ret)
1786			goto restore;
1787	} else {
1788		if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1789			btrfs_err(fs_info,
1790				"Remounting read-write after error is not allowed");
1791			ret = -EINVAL;
1792			goto restore;
1793		}
1794		if (fs_info->fs_devices->rw_devices == 0) {
1795			ret = -EACCES;
1796			goto restore;
1797		}
1798
1799		if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1800			btrfs_warn(fs_info,
1801		"too many missing devices, writable remount is not allowed");
1802			ret = -EACCES;
1803			goto restore;
1804		}
1805
1806		if (btrfs_super_log_root(fs_info->super_copy) != 0) {
1807			ret = -EINVAL;
1808			goto restore;
1809		}
 
 
 
1810
1811		ret = btrfs_cleanup_fs_roots(fs_info);
1812		if (ret)
1813			goto restore;
1814
1815		/* recover relocation */
1816		mutex_lock(&fs_info->cleaner_mutex);
1817		ret = btrfs_recover_relocation(root);
1818		mutex_unlock(&fs_info->cleaner_mutex);
1819		if (ret)
1820			goto restore;
1821
1822		ret = btrfs_resume_balance_async(fs_info);
1823		if (ret)
1824			goto restore;
1825
1826		ret = btrfs_resume_dev_replace_async(fs_info);
1827		if (ret) {
1828			btrfs_warn(fs_info, "failed to resume dev_replace");
1829			goto restore;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1830		}
 
1831
1832		btrfs_qgroup_rescan_resume(fs_info);
 
 
 
 
 
 
1833
1834		if (!fs_info->uuid_root) {
1835			btrfs_info(fs_info, "creating UUID tree");
1836			ret = btrfs_create_uuid_tree(fs_info);
1837			if (ret) {
1838				btrfs_warn(fs_info,
1839					   "failed to create the UUID tree %d",
1840					   ret);
1841				goto restore;
1842			}
1843		}
1844		sb->s_flags &= ~SB_RDONLY;
1845
1846		set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1847	}
1848out:
1849	wake_up_process(fs_info->transaction_kthread);
1850	btrfs_remount_cleanup(fs_info, old_opts);
 
 
 
1851	return 0;
1852
1853restore:
1854	/* We've hit an error - don't reset SB_RDONLY */
1855	if (sb_rdonly(sb))
1856		old_flags |= SB_RDONLY;
1857	sb->s_flags = old_flags;
1858	fs_info->mount_opt = old_opts;
1859	fs_info->compress_type = old_compress_type;
1860	fs_info->max_inline = old_max_inline;
1861	btrfs_resize_thread_pool(fs_info,
1862		old_thread_pool_size, fs_info->thread_pool_size);
1863	fs_info->metadata_ratio = old_metadata_ratio;
1864	btrfs_remount_cleanup(fs_info, old_opts);
1865	return ret;
1866}
1867
1868/* Used to sort the devices by max_avail(descending sort) */
1869static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
1870				       const void *dev_info2)
1871{
1872	if (((struct btrfs_device_info *)dev_info1)->max_avail >
1873	    ((struct btrfs_device_info *)dev_info2)->max_avail)
 
 
1874		return -1;
1875	else if (((struct btrfs_device_info *)dev_info1)->max_avail <
1876		 ((struct btrfs_device_info *)dev_info2)->max_avail)
1877		return 1;
1878	else
1879	return 0;
1880}
1881
1882/*
1883 * sort the devices by max_avail, in which max free extent size of each device
1884 * is stored.(Descending Sort)
1885 */
1886static inline void btrfs_descending_sort_devices(
1887					struct btrfs_device_info *devices,
1888					size_t nr_devices)
1889{
1890	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
1891	     btrfs_cmp_device_free_bytes, NULL);
1892}
1893
1894/*
1895 * The helper to calc the free space on the devices that can be used to store
1896 * file data.
1897 */
1898static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
1899					      u64 *free_bytes)
1900{
1901	struct btrfs_device_info *devices_info;
1902	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1903	struct btrfs_device *device;
1904	u64 type;
1905	u64 avail_space;
1906	u64 min_stripe_size;
1907	int num_stripes = 1;
1908	int i = 0, nr_devices;
1909	const struct btrfs_raid_attr *rattr;
1910
1911	/*
1912	 * We aren't under the device list lock, so this is racy-ish, but good
1913	 * enough for our purposes.
1914	 */
1915	nr_devices = fs_info->fs_devices->open_devices;
1916	if (!nr_devices) {
1917		smp_mb();
1918		nr_devices = fs_info->fs_devices->open_devices;
1919		ASSERT(nr_devices);
1920		if (!nr_devices) {
1921			*free_bytes = 0;
1922			return 0;
1923		}
1924	}
1925
1926	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
1927			       GFP_KERNEL);
1928	if (!devices_info)
1929		return -ENOMEM;
1930
1931	/* calc min stripe number for data space allocation */
1932	type = btrfs_data_alloc_profile(fs_info);
1933	rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
1934
1935	if (type & BTRFS_BLOCK_GROUP_RAID0)
1936		num_stripes = nr_devices;
1937	else if (type & BTRFS_BLOCK_GROUP_RAID1)
1938		num_stripes = 2;
1939	else if (type & BTRFS_BLOCK_GROUP_RAID10)
1940		num_stripes = 4;
1941
1942	/* Adjust for more than 1 stripe per device */
1943	min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
1944
1945	rcu_read_lock();
1946	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
1947		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1948						&device->dev_state) ||
1949		    !device->bdev ||
1950		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
1951			continue;
1952
1953		if (i >= nr_devices)
1954			break;
1955
1956		avail_space = device->total_bytes - device->bytes_used;
1957
1958		/* align with stripe_len */
1959		avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
1960
1961		/*
1962		 * In order to avoid overwriting the superblock on the drive,
1963		 * btrfs starts at an offset of at least 1MB when doing chunk
1964		 * allocation.
1965		 *
1966		 * This ensures we have at least min_stripe_size free space
1967		 * after excluding 1MB.
1968		 */
1969		if (avail_space <= SZ_1M + min_stripe_size)
1970			continue;
1971
1972		avail_space -= SZ_1M;
1973
1974		devices_info[i].dev = device;
1975		devices_info[i].max_avail = avail_space;
1976
1977		i++;
1978	}
1979	rcu_read_unlock();
1980
1981	nr_devices = i;
1982
1983	btrfs_descending_sort_devices(devices_info, nr_devices);
1984
1985	i = nr_devices - 1;
1986	avail_space = 0;
1987	while (nr_devices >= rattr->devs_min) {
1988		num_stripes = min(num_stripes, nr_devices);
1989
1990		if (devices_info[i].max_avail >= min_stripe_size) {
1991			int j;
1992			u64 alloc_size;
1993
1994			avail_space += devices_info[i].max_avail * num_stripes;
1995			alloc_size = devices_info[i].max_avail;
1996			for (j = i + 1 - num_stripes; j <= i; j++)
1997				devices_info[j].max_avail -= alloc_size;
1998		}
1999		i--;
2000		nr_devices--;
2001	}
2002
2003	kfree(devices_info);
2004	*free_bytes = avail_space;
2005	return 0;
2006}
2007
2008/*
2009 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
2010 *
2011 * If there's a redundant raid level at DATA block groups, use the respective
2012 * multiplier to scale the sizes.
2013 *
2014 * Unused device space usage is based on simulating the chunk allocator
2015 * algorithm that respects the device sizes and order of allocations.  This is
2016 * a close approximation of the actual use but there are other factors that may
2017 * change the result (like a new metadata chunk).
2018 *
2019 * If metadata is exhausted, f_bavail will be 0.
2020 */
2021static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2022{
2023	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
2024	struct btrfs_super_block *disk_super = fs_info->super_copy;
2025	struct list_head *head = &fs_info->space_info;
2026	struct btrfs_space_info *found;
2027	u64 total_used = 0;
2028	u64 total_free_data = 0;
2029	u64 total_free_meta = 0;
2030	int bits = dentry->d_sb->s_blocksize_bits;
2031	__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
2032	unsigned factor = 1;
2033	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
2034	int ret;
2035	u64 thresh = 0;
2036	int mixed = 0;
2037
2038	rcu_read_lock();
2039	list_for_each_entry_rcu(found, head, list) {
2040		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
2041			int i;
2042
2043			total_free_data += found->disk_total - found->disk_used;
2044			total_free_data -=
2045				btrfs_account_ro_block_groups_free_space(found);
2046
2047			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2048				if (!list_empty(&found->block_groups[i]))
2049					factor = btrfs_bg_type_to_factor(
2050						btrfs_raid_array[i].bg_flag);
2051			}
2052		}
2053
2054		/*
2055		 * Metadata in mixed block goup profiles are accounted in data
2056		 */
2057		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
2058			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
2059				mixed = 1;
2060			else
2061				total_free_meta += found->disk_total -
2062					found->disk_used;
2063		}
2064
2065		total_used += found->disk_used;
2066	}
2067
2068	rcu_read_unlock();
2069
2070	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
2071	buf->f_blocks >>= bits;
2072	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
2073
2074	/* Account global block reserve as used, it's in logical size already */
2075	spin_lock(&block_rsv->lock);
2076	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
2077	if (buf->f_bfree >= block_rsv->size >> bits)
2078		buf->f_bfree -= block_rsv->size >> bits;
2079	else
2080		buf->f_bfree = 0;
2081	spin_unlock(&block_rsv->lock);
2082
2083	buf->f_bavail = div_u64(total_free_data, factor);
2084	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
2085	if (ret)
2086		return ret;
2087	buf->f_bavail += div_u64(total_free_data, factor);
2088	buf->f_bavail = buf->f_bavail >> bits;
2089
2090	/*
2091	 * We calculate the remaining metadata space minus global reserve. If
2092	 * this is (supposedly) smaller than zero, there's no space. But this
2093	 * does not hold in practice, the exhausted state happens where's still
2094	 * some positive delta. So we apply some guesswork and compare the
2095	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
2096	 *
2097	 * We probably cannot calculate the exact threshold value because this
2098	 * depends on the internal reservations requested by various
2099	 * operations, so some operations that consume a few metadata will
2100	 * succeed even if the Avail is zero. But this is better than the other
2101	 * way around.
2102	 */
2103	thresh = SZ_4M;
2104
2105	if (!mixed && total_free_meta - thresh < block_rsv->size)
 
 
 
 
 
 
 
 
2106		buf->f_bavail = 0;
2107
2108	buf->f_type = BTRFS_SUPER_MAGIC;
2109	buf->f_bsize = dentry->d_sb->s_blocksize;
2110	buf->f_namelen = BTRFS_NAME_LEN;
2111
2112	/* We treat it as constant endianness (it doesn't matter _which_)
2113	   because we want the fsid to come out the same whether mounted
2114	   on a big-endian or little-endian host */
2115	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
2116	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
2117	/* Mask in the root object ID too, to disambiguate subvols */
2118	buf->f_fsid.val[0] ^=
2119		BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
2120	buf->f_fsid.val[1] ^=
2121		BTRFS_I(d_inode(dentry))->root->root_key.objectid;
2122
2123	return 0;
2124}
2125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126static void btrfs_kill_super(struct super_block *sb)
2127{
2128	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2129	kill_anon_super(sb);
2130	free_fs_info(fs_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2131}
2132
2133static struct file_system_type btrfs_fs_type = {
2134	.owner		= THIS_MODULE,
2135	.name		= "btrfs",
2136	.mount		= btrfs_mount,
2137	.kill_sb	= btrfs_kill_super,
2138	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2139};
2140
2141static struct file_system_type btrfs_root_fs_type = {
2142	.owner		= THIS_MODULE,
2143	.name		= "btrfs",
2144	.mount		= btrfs_mount_root,
2145	.kill_sb	= btrfs_kill_super,
2146	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2147};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2148
2149MODULE_ALIAS_FS("btrfs");
2150
2151static int btrfs_control_open(struct inode *inode, struct file *file)
2152{
2153	/*
2154	 * The control file's private_data is used to hold the
2155	 * transaction when it is started and is used to keep
2156	 * track of whether a transaction is already in progress.
2157	 */
2158	file->private_data = NULL;
2159	return 0;
2160}
2161
2162/*
2163 * used by btrfsctl to scan devices when no FS is mounted
2164 */
2165static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2166				unsigned long arg)
2167{
2168	struct btrfs_ioctl_vol_args *vol;
2169	struct btrfs_device *device = NULL;
 
2170	int ret = -ENOTTY;
2171
2172	if (!capable(CAP_SYS_ADMIN))
2173		return -EPERM;
2174
2175	vol = memdup_user((void __user *)arg, sizeof(*vol));
2176	if (IS_ERR(vol))
2177		return PTR_ERR(vol);
2178	vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2179
2180	switch (cmd) {
2181	case BTRFS_IOC_SCAN_DEV:
2182		mutex_lock(&uuid_mutex);
2183		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2184					       &btrfs_root_fs_type);
 
 
 
2185		ret = PTR_ERR_OR_ZERO(device);
2186		mutex_unlock(&uuid_mutex);
2187		break;
2188	case BTRFS_IOC_FORGET_DEV:
2189		ret = btrfs_forget_devices(vol->name);
 
 
 
 
 
2190		break;
2191	case BTRFS_IOC_DEVICES_READY:
2192		mutex_lock(&uuid_mutex);
2193		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2194					       &btrfs_root_fs_type);
2195		if (IS_ERR(device)) {
 
 
 
2196			mutex_unlock(&uuid_mutex);
2197			ret = PTR_ERR(device);
2198			break;
2199		}
2200		ret = !(device->fs_devices->num_devices ==
2201			device->fs_devices->total_devices);
2202		mutex_unlock(&uuid_mutex);
2203		break;
2204	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2205		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2206		break;
2207	}
2208
2209	kfree(vol);
2210	return ret;
2211}
2212
2213static int btrfs_freeze(struct super_block *sb)
2214{
2215	struct btrfs_trans_handle *trans;
2216	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2217	struct btrfs_root *root = fs_info->tree_root;
2218
2219	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2220	/*
2221	 * We don't need a barrier here, we'll wait for any transaction that
2222	 * could be in progress on other threads (and do delayed iputs that
2223	 * we want to avoid on a frozen filesystem), or do the commit
2224	 * ourselves.
2225	 */
2226	trans = btrfs_attach_transaction_barrier(root);
2227	if (IS_ERR(trans)) {
2228		/* no transaction, don't bother */
2229		if (PTR_ERR(trans) == -ENOENT)
2230			return 0;
2231		return PTR_ERR(trans);
2232	}
2233	return btrfs_commit_transaction(trans);
2234}
2235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2236static int btrfs_unfreeze(struct super_block *sb)
2237{
2238	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 
 
2239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2240	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
 
 
 
 
 
 
2241	return 0;
2242}
2243
2244static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2245{
2246	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2247	struct btrfs_fs_devices *cur_devices;
2248	struct btrfs_device *dev, *first_dev = NULL;
2249	struct list_head *head;
2250
2251	/*
2252	 * Lightweight locking of the devices. We should not need
2253	 * device_list_mutex here as we only read the device data and the list
2254	 * is protected by RCU.  Even if a device is deleted during the list
2255	 * traversals, we'll get valid data, the freeing callback will wait at
2256	 * least until the rcu_read_unlock.
2257	 */
2258	rcu_read_lock();
2259	cur_devices = fs_info->fs_devices;
2260	while (cur_devices) {
2261		head = &cur_devices->devices;
2262		list_for_each_entry_rcu(dev, head, dev_list) {
2263			if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2264				continue;
2265			if (!dev->name)
2266				continue;
2267			if (!first_dev || dev->devid < first_dev->devid)
2268				first_dev = dev;
2269		}
2270		cur_devices = cur_devices->seed;
2271	}
2272
2273	if (first_dev)
2274		seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
2275	else
2276		WARN_ON(1);
2277	rcu_read_unlock();
2278	return 0;
2279}
2280
2281static const struct super_operations btrfs_super_ops = {
2282	.drop_inode	= btrfs_drop_inode,
2283	.evict_inode	= btrfs_evict_inode,
2284	.put_super	= btrfs_put_super,
2285	.sync_fs	= btrfs_sync_fs,
2286	.show_options	= btrfs_show_options,
2287	.show_devname	= btrfs_show_devname,
2288	.alloc_inode	= btrfs_alloc_inode,
2289	.destroy_inode	= btrfs_destroy_inode,
2290	.free_inode	= btrfs_free_inode,
2291	.statfs		= btrfs_statfs,
2292	.remount_fs	= btrfs_remount,
2293	.freeze_fs	= btrfs_freeze,
2294	.unfreeze_fs	= btrfs_unfreeze,
2295};
2296
2297static const struct file_operations btrfs_ctl_fops = {
2298	.open = btrfs_control_open,
2299	.unlocked_ioctl	 = btrfs_control_ioctl,
2300	.compat_ioctl = btrfs_control_ioctl,
2301	.owner	 = THIS_MODULE,
2302	.llseek = noop_llseek,
2303};
2304
2305static struct miscdevice btrfs_misc = {
2306	.minor		= BTRFS_MINOR,
2307	.name		= "btrfs-control",
2308	.fops		= &btrfs_ctl_fops
2309};
2310
2311MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2312MODULE_ALIAS("devname:btrfs-control");
2313
2314static int __init btrfs_interface_init(void)
2315{
2316	return misc_register(&btrfs_misc);
2317}
2318
2319static __cold void btrfs_interface_exit(void)
2320{
2321	misc_deregister(&btrfs_misc);
2322}
2323
2324static void __init btrfs_print_mod_info(void)
2325{
2326	static const char options[] = ""
2327#ifdef CONFIG_BTRFS_DEBUG
2328			", debug=on"
2329#endif
2330#ifdef CONFIG_BTRFS_ASSERT
2331			", assert=on"
2332#endif
2333#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2334			", integrity-checker=on"
2335#endif
2336#ifdef CONFIG_BTRFS_FS_REF_VERIFY
2337			", ref-verify=on"
2338#endif
 
 
 
 
 
 
 
 
 
 
2339			;
2340	pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
 
2341}
2342
2343static int __init init_btrfs_fs(void)
2344{
2345	int err;
 
2346
2347	btrfs_props_init();
 
 
 
2348
2349	err = btrfs_init_sysfs();
2350	if (err)
2351		return err;
 
 
 
2352
2353	btrfs_init_compress();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2354
2355	err = btrfs_init_cachep();
2356	if (err)
2357		goto free_compress;
2358
2359	err = extent_io_init();
2360	if (err)
2361		goto free_cachep;
2362
2363	err = extent_map_init();
2364	if (err)
2365		goto free_extent_io;
2366
2367	err = ordered_data_init();
2368	if (err)
2369		goto free_extent_map;
2370
2371	err = btrfs_delayed_inode_init();
2372	if (err)
2373		goto free_ordered_data;
2374
2375	err = btrfs_auto_defrag_init();
2376	if (err)
2377		goto free_delayed_inode;
2378
2379	err = btrfs_delayed_ref_init();
2380	if (err)
2381		goto free_auto_defrag;
2382
2383	err = btrfs_prelim_ref_init();
2384	if (err)
2385		goto free_delayed_ref;
2386
2387	err = btrfs_end_io_wq_init();
2388	if (err)
2389		goto free_prelim_ref;
2390
2391	err = btrfs_interface_init();
2392	if (err)
2393		goto free_end_io_wq;
2394
2395	btrfs_init_lockdep();
2396
2397	btrfs_print_mod_info();
2398
2399	err = btrfs_run_sanity_tests();
2400	if (err)
2401		goto unregister_ioctl;
2402
2403	err = register_filesystem(&btrfs_fs_type);
2404	if (err)
2405		goto unregister_ioctl;
2406
2407	return 0;
2408
2409unregister_ioctl:
2410	btrfs_interface_exit();
2411free_end_io_wq:
2412	btrfs_end_io_wq_exit();
2413free_prelim_ref:
2414	btrfs_prelim_ref_exit();
2415free_delayed_ref:
2416	btrfs_delayed_ref_exit();
2417free_auto_defrag:
2418	btrfs_auto_defrag_exit();
2419free_delayed_inode:
2420	btrfs_delayed_inode_exit();
2421free_ordered_data:
2422	ordered_data_exit();
2423free_extent_map:
2424	extent_map_exit();
2425free_extent_io:
2426	extent_io_exit();
2427free_cachep:
2428	btrfs_destroy_cachep();
2429free_compress:
2430	btrfs_exit_compress();
2431	btrfs_exit_sysfs();
2432
2433	return err;
2434}
2435
2436static void __exit exit_btrfs_fs(void)
2437{
2438	btrfs_destroy_cachep();
2439	btrfs_delayed_ref_exit();
2440	btrfs_auto_defrag_exit();
2441	btrfs_delayed_inode_exit();
2442	btrfs_prelim_ref_exit();
2443	ordered_data_exit();
2444	extent_map_exit();
2445	extent_io_exit();
2446	btrfs_interface_exit();
2447	btrfs_end_io_wq_exit();
2448	unregister_filesystem(&btrfs_fs_type);
2449	btrfs_exit_sysfs();
2450	btrfs_cleanup_fs_uuids();
2451	btrfs_exit_compress();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2452}
2453
2454late_initcall(init_btrfs_fs);
2455module_exit(exit_btrfs_fs)
2456
2457MODULE_LICENSE("GPL");
2458MODULE_SOFTDEP("pre: crc32c");