Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/module.h>
   8#include <linux/buffer_head.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
  11#include <linux/highmem.h>
  12#include <linux/time.h>
  13#include <linux/init.h>
  14#include <linux/seq_file.h>
  15#include <linux/string.h>
  16#include <linux/backing-dev.h>
  17#include <linux/mount.h>
  18#include <linux/mpage.h>
  19#include <linux/swap.h>
  20#include <linux/writeback.h>
  21#include <linux/statfs.h>
  22#include <linux/compat.h>
  23#include <linux/parser.h>
  24#include <linux/ctype.h>
  25#include <linux/namei.h>
  26#include <linux/miscdevice.h>
  27#include <linux/magic.h>
  28#include <linux/slab.h>
  29#include <linux/cleancache.h>
  30#include <linux/ratelimit.h>
  31#include <linux/crc32c.h>
  32#include <linux/btrfs.h>
  33#include "delayed-inode.h"
  34#include "ctree.h"
  35#include "disk-io.h"
  36#include "transaction.h"
  37#include "btrfs_inode.h"
  38#include "print-tree.h"
  39#include "props.h"
  40#include "xattr.h"
  41#include "volumes.h"
  42#include "export.h"
  43#include "compression.h"
  44#include "rcu-string.h"
  45#include "dev-replace.h"
  46#include "free-space-cache.h"
  47#include "backref.h"
 
 
 
  48#include "tests/btrfs-tests.h"
  49
 
  50#include "qgroup.h"
  51#define CREATE_TRACE_POINTS
  52#include <trace/events/btrfs.h>
  53
  54static const struct super_operations btrfs_super_ops;
  55
  56/*
  57 * Types for mounting the default subvolume and a subvolume explicitly
  58 * requested by subvol=/path. That way the callchain is straightforward and we
  59 * don't have to play tricks with the mount options and recursive calls to
  60 * btrfs_mount.
  61 *
  62 * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
  63 */
  64static struct file_system_type btrfs_fs_type;
  65static struct file_system_type btrfs_root_fs_type;
  66
  67static int btrfs_remount(struct super_block *sb, int *flags, char *data);
  68
  69const char *btrfs_decode_error(int errno)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70{
  71	char *errstr = "unknown";
  72
  73	switch (errno) {
  74	case -EIO:
 
 
 
  75		errstr = "IO failure";
  76		break;
  77	case -ENOMEM:
  78		errstr = "Out of memory";
  79		break;
  80	case -EROFS:
  81		errstr = "Readonly filesystem";
  82		break;
  83	case -EEXIST:
  84		errstr = "Object already exists";
  85		break;
  86	case -ENOSPC:
  87		errstr = "No space left";
  88		break;
  89	case -ENOENT:
  90		errstr = "No such entry";
 
 
 
 
 
 
 
 
 
  91		break;
  92	}
  93
  94	return errstr;
  95}
  96
  97/*
  98 * __btrfs_handle_fs_error decodes expected errors from the caller and
  99 * invokes the approciate error response.
 100 */
 101__cold
 102void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
 103		       unsigned int line, int errno, const char *fmt, ...)
 104{
 105	struct super_block *sb = fs_info->sb;
 106#ifdef CONFIG_PRINTK
 107	const char *errstr;
 108#endif
 109
 110	/*
 111	 * Special case: if the error is EROFS, and we're already
 112	 * under SB_RDONLY, then it is safe here.
 113	 */
 114	if (errno == -EROFS && sb_rdonly(sb))
 115  		return;
 116
 117#ifdef CONFIG_PRINTK
 118	errstr = btrfs_decode_error(errno);
 119	if (fmt) {
 120		struct va_format vaf;
 121		va_list args;
 122
 123		va_start(args, fmt);
 124		vaf.fmt = fmt;
 125		vaf.va = &args;
 126
 127		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
 128			sb->s_id, function, line, errno, errstr, &vaf);
 129		va_end(args);
 130	} else {
 131		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
 132			sb->s_id, function, line, errno, errstr);
 133	}
 134#endif
 135
 136	/*
 137	 * Today we only save the error info to memory.  Long term we'll
 138	 * also send it down to the disk
 139	 */
 140	set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 141
 142	/* Don't go through full error handling during mount */
 143	if (!(sb->s_flags & SB_BORN))
 144		return;
 145
 146	if (sb_rdonly(sb))
 147		return;
 148
 
 
 149	/* btrfs handle error by forcing the filesystem readonly */
 150	sb->s_flags |= SB_RDONLY;
 151	btrfs_info(fs_info, "forced readonly");
 152	/*
 153	 * Note that a running device replace operation is not canceled here
 154	 * although there is no way to update the progress. It would add the
 155	 * risk of a deadlock, therefore the canceling is omitted. The only
 156	 * penalty is that some I/O remains active until the procedure
 157	 * completes. The next time when the filesystem is mounted writeable
 158	 * again, the device replace operation continues.
 159	 */
 160}
 161
 162#ifdef CONFIG_PRINTK
 163static const char * const logtypes[] = {
 164	"emergency",
 165	"alert",
 166	"critical",
 167	"error",
 168	"warning",
 169	"notice",
 170	"info",
 171	"debug",
 172};
 173
 174
 175/*
 176 * Use one ratelimit state per log level so that a flood of less important
 177 * messages doesn't cause more important ones to be dropped.
 178 */
 179static struct ratelimit_state printk_limits[] = {
 180	RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
 181	RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
 182	RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
 183	RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
 184	RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
 185	RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
 186	RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
 187	RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
 188};
 189
 190void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 191{
 192	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
 193	struct va_format vaf;
 194	va_list args;
 195	int kern_level;
 196	const char *type = logtypes[4];
 197	struct ratelimit_state *ratelimit = &printk_limits[4];
 198
 199	va_start(args, fmt);
 200
 201	while ((kern_level = printk_get_level(fmt)) != 0) {
 202		size_t size = printk_skip_level(fmt) - fmt;
 203
 204		if (kern_level >= '0' && kern_level <= '7') {
 205			memcpy(lvl, fmt,  size);
 206			lvl[size] = '\0';
 207			type = logtypes[kern_level - '0'];
 208			ratelimit = &printk_limits[kern_level - '0'];
 209		}
 210		fmt += size;
 211	}
 212
 213	vaf.fmt = fmt;
 214	vaf.va = &args;
 215
 216	if (__ratelimit(ratelimit))
 217		printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
 218			fs_info ? fs_info->sb->s_id : "<unknown>", &vaf);
 
 
 
 
 219
 220	va_end(args);
 221}
 222#endif
 223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224/*
 225 * We only mark the transaction aborted and then set the file system read-only.
 226 * This will prevent new transactions from starting or trying to join this
 227 * one.
 228 *
 229 * This means that error recovery at the call site is limited to freeing
 230 * any local memory allocations and passing the error code up without
 231 * further cleanup. The transaction should complete as it normally would
 232 * in the call path but will return -EIO.
 233 *
 234 * We'll complete the cleanup in btrfs_end_transaction and
 235 * btrfs_commit_transaction.
 236 */
 237__cold
 238void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
 239			       const char *function,
 240			       unsigned int line, int errno)
 241{
 242	struct btrfs_fs_info *fs_info = trans->fs_info;
 243
 244	trans->aborted = errno;
 245	/* Nothing used. The other threads that have joined this
 246	 * transaction may be able to continue. */
 247	if (!trans->dirty && list_empty(&trans->new_bgs)) {
 248		const char *errstr;
 249
 250		errstr = btrfs_decode_error(errno);
 251		btrfs_warn(fs_info,
 252		           "%s:%d: Aborting unused transaction(%s).",
 253		           function, line, errstr);
 254		return;
 255	}
 256	WRITE_ONCE(trans->transaction->aborted, errno);
 257	/* Wake up anybody who may be waiting on this transaction */
 258	wake_up(&fs_info->transaction_wait);
 259	wake_up(&fs_info->transaction_blocked_wait);
 260	__btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
 261}
 262/*
 263 * __btrfs_panic decodes unexpected, fatal errors from the caller,
 264 * issues an alert, and either panics or BUGs, depending on mount options.
 265 */
 266__cold
 267void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
 268		   unsigned int line, int errno, const char *fmt, ...)
 269{
 270	char *s_id = "<unknown>";
 271	const char *errstr;
 272	struct va_format vaf = { .fmt = fmt };
 273	va_list args;
 274
 275	if (fs_info)
 276		s_id = fs_info->sb->s_id;
 277
 278	va_start(args, fmt);
 279	vaf.va = &args;
 280
 281	errstr = btrfs_decode_error(errno);
 282	if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
 283		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
 284			s_id, function, line, &vaf, errno, errstr);
 285
 286	btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
 287		   function, line, &vaf, errno, errstr);
 288	va_end(args);
 289	/* Caller calls BUG() */
 290}
 291
 292static void btrfs_put_super(struct super_block *sb)
 293{
 294	close_ctree(btrfs_sb(sb));
 295}
 296
 297enum {
 298	Opt_acl, Opt_noacl,
 299	Opt_clear_cache,
 300	Opt_commit_interval,
 301	Opt_compress,
 302	Opt_compress_force,
 303	Opt_compress_force_type,
 304	Opt_compress_type,
 305	Opt_degraded,
 306	Opt_device,
 307	Opt_fatal_errors,
 308	Opt_flushoncommit, Opt_noflushoncommit,
 309	Opt_inode_cache, Opt_noinode_cache,
 310	Opt_max_inline,
 311	Opt_barrier, Opt_nobarrier,
 312	Opt_datacow, Opt_nodatacow,
 313	Opt_datasum, Opt_nodatasum,
 314	Opt_defrag, Opt_nodefrag,
 315	Opt_discard, Opt_nodiscard,
 316	Opt_nologreplay,
 317	Opt_norecovery,
 318	Opt_ratio,
 319	Opt_rescan_uuid_tree,
 320	Opt_skip_balance,
 321	Opt_space_cache, Opt_no_space_cache,
 322	Opt_space_cache_version,
 323	Opt_ssd, Opt_nossd,
 324	Opt_ssd_spread, Opt_nossd_spread,
 325	Opt_subvol,
 
 326	Opt_subvolid,
 327	Opt_thread_pool,
 328	Opt_treelog, Opt_notreelog,
 329	Opt_usebackuproot,
 330	Opt_user_subvol_rm_allowed,
 331
 
 
 
 
 
 
 
 
 332	/* Deprecated options */
 333	Opt_alloc_start,
 334	Opt_recovery,
 335	Opt_subvolrootid,
 336
 337	/* Debugging options */
 338	Opt_check_integrity,
 339	Opt_check_integrity_including_extent_data,
 340	Opt_check_integrity_print_mask,
 341	Opt_enospc_debug, Opt_noenospc_debug,
 342#ifdef CONFIG_BTRFS_DEBUG
 343	Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
 344#endif
 345#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 346	Opt_ref_verify,
 347#endif
 348	Opt_err,
 349};
 350
 351static const match_table_t tokens = {
 352	{Opt_acl, "acl"},
 353	{Opt_noacl, "noacl"},
 354	{Opt_clear_cache, "clear_cache"},
 355	{Opt_commit_interval, "commit=%u"},
 356	{Opt_compress, "compress"},
 357	{Opt_compress_type, "compress=%s"},
 358	{Opt_compress_force, "compress-force"},
 359	{Opt_compress_force_type, "compress-force=%s"},
 360	{Opt_degraded, "degraded"},
 361	{Opt_device, "device=%s"},
 362	{Opt_fatal_errors, "fatal_errors=%s"},
 363	{Opt_flushoncommit, "flushoncommit"},
 364	{Opt_noflushoncommit, "noflushoncommit"},
 365	{Opt_inode_cache, "inode_cache"},
 366	{Opt_noinode_cache, "noinode_cache"},
 367	{Opt_max_inline, "max_inline=%s"},
 368	{Opt_barrier, "barrier"},
 369	{Opt_nobarrier, "nobarrier"},
 370	{Opt_datacow, "datacow"},
 371	{Opt_nodatacow, "nodatacow"},
 372	{Opt_datasum, "datasum"},
 373	{Opt_nodatasum, "nodatasum"},
 374	{Opt_defrag, "autodefrag"},
 375	{Opt_nodefrag, "noautodefrag"},
 376	{Opt_discard, "discard"},
 
 377	{Opt_nodiscard, "nodiscard"},
 378	{Opt_nologreplay, "nologreplay"},
 379	{Opt_norecovery, "norecovery"},
 380	{Opt_ratio, "metadata_ratio=%u"},
 381	{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
 382	{Opt_skip_balance, "skip_balance"},
 383	{Opt_space_cache, "space_cache"},
 384	{Opt_no_space_cache, "nospace_cache"},
 385	{Opt_space_cache_version, "space_cache=%s"},
 386	{Opt_ssd, "ssd"},
 387	{Opt_nossd, "nossd"},
 388	{Opt_ssd_spread, "ssd_spread"},
 389	{Opt_nossd_spread, "nossd_spread"},
 390	{Opt_subvol, "subvol=%s"},
 
 391	{Opt_subvolid, "subvolid=%s"},
 392	{Opt_thread_pool, "thread_pool=%u"},
 393	{Opt_treelog, "treelog"},
 394	{Opt_notreelog, "notreelog"},
 395	{Opt_usebackuproot, "usebackuproot"},
 396	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
 397
 
 
 
 
 
 
 
 398	/* Deprecated options */
 399	{Opt_alloc_start, "alloc_start=%s"},
 400	{Opt_recovery, "recovery"},
 401	{Opt_subvolrootid, "subvolrootid=%d"},
 402
 403	/* Debugging options */
 404	{Opt_check_integrity, "check_int"},
 405	{Opt_check_integrity_including_extent_data, "check_int_data"},
 406	{Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
 407	{Opt_enospc_debug, "enospc_debug"},
 408	{Opt_noenospc_debug, "noenospc_debug"},
 409#ifdef CONFIG_BTRFS_DEBUG
 410	{Opt_fragment_data, "fragment=data"},
 411	{Opt_fragment_metadata, "fragment=metadata"},
 412	{Opt_fragment_all, "fragment=all"},
 413#endif
 414#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 415	{Opt_ref_verify, "ref_verify"},
 416#endif
 417	{Opt_err, NULL},
 418};
 419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420/*
 421 * Regular mount options parser.  Everything that is needed only when
 422 * reading in a new superblock is parsed here.
 423 * XXX JDM: This needs to be cleaned up for remount.
 424 */
 425int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
 426			unsigned long new_flags)
 427{
 428	substring_t args[MAX_OPT_ARGS];
 429	char *p, *num;
 430	u64 cache_gen;
 431	int intarg;
 432	int ret = 0;
 433	char *compress_type;
 434	bool compress_force = false;
 435	enum btrfs_compression_type saved_compress_type;
 
 436	bool saved_compress_force;
 437	int no_compress = 0;
 438
 439	cache_gen = btrfs_super_cache_generation(info->super_copy);
 440	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
 441		btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
 442	else if (cache_gen)
 443		btrfs_set_opt(info->mount_opt, SPACE_CACHE);
 
 
 
 
 
 
 
 444
 445	/*
 446	 * Even the options are empty, we still need to do extra check
 447	 * against new flags
 448	 */
 449	if (!options)
 450		goto check;
 451
 452	while ((p = strsep(&options, ",")) != NULL) {
 453		int token;
 454		if (!*p)
 455			continue;
 456
 457		token = match_token(p, tokens, args);
 458		switch (token) {
 459		case Opt_degraded:
 460			btrfs_info(info, "allowing degraded mounts");
 461			btrfs_set_opt(info->mount_opt, DEGRADED);
 462			break;
 463		case Opt_subvol:
 
 464		case Opt_subvolid:
 465		case Opt_subvolrootid:
 466		case Opt_device:
 467			/*
 468			 * These are parsed by btrfs_parse_subvol_options
 469			 * and btrfs_parse_early_options
 470			 * and can be happily ignored here.
 471			 */
 472			break;
 473		case Opt_nodatasum:
 474			btrfs_set_and_info(info, NODATASUM,
 475					   "setting nodatasum");
 476			break;
 477		case Opt_datasum:
 478			if (btrfs_test_opt(info, NODATASUM)) {
 479				if (btrfs_test_opt(info, NODATACOW))
 480					btrfs_info(info,
 481						   "setting datasum, datacow enabled");
 482				else
 483					btrfs_info(info, "setting datasum");
 484			}
 485			btrfs_clear_opt(info->mount_opt, NODATACOW);
 486			btrfs_clear_opt(info->mount_opt, NODATASUM);
 487			break;
 488		case Opt_nodatacow:
 489			if (!btrfs_test_opt(info, NODATACOW)) {
 490				if (!btrfs_test_opt(info, COMPRESS) ||
 491				    !btrfs_test_opt(info, FORCE_COMPRESS)) {
 492					btrfs_info(info,
 493						   "setting nodatacow, compression disabled");
 494				} else {
 495					btrfs_info(info, "setting nodatacow");
 496				}
 497			}
 498			btrfs_clear_opt(info->mount_opt, COMPRESS);
 499			btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 500			btrfs_set_opt(info->mount_opt, NODATACOW);
 501			btrfs_set_opt(info->mount_opt, NODATASUM);
 502			break;
 503		case Opt_datacow:
 504			btrfs_clear_and_info(info, NODATACOW,
 505					     "setting datacow");
 506			break;
 507		case Opt_compress_force:
 508		case Opt_compress_force_type:
 509			compress_force = true;
 510			/* Fallthrough */
 511		case Opt_compress:
 512		case Opt_compress_type:
 513			saved_compress_type = btrfs_test_opt(info,
 514							     COMPRESS) ?
 515				info->compress_type : BTRFS_COMPRESS_NONE;
 516			saved_compress_force =
 517				btrfs_test_opt(info, FORCE_COMPRESS);
 
 518			if (token == Opt_compress ||
 519			    token == Opt_compress_force ||
 520			    strncmp(args[0].from, "zlib", 4) == 0) {
 521				compress_type = "zlib";
 522
 523				info->compress_type = BTRFS_COMPRESS_ZLIB;
 524				info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
 525				/*
 526				 * args[0] contains uninitialized data since
 527				 * for these tokens we don't expect any
 528				 * parameter.
 529				 */
 530				if (token != Opt_compress &&
 531				    token != Opt_compress_force)
 532					info->compress_level =
 533					  btrfs_compress_str2level(args[0].from);
 
 
 534				btrfs_set_opt(info->mount_opt, COMPRESS);
 535				btrfs_clear_opt(info->mount_opt, NODATACOW);
 536				btrfs_clear_opt(info->mount_opt, NODATASUM);
 537				no_compress = 0;
 538			} else if (strncmp(args[0].from, "lzo", 3) == 0) {
 539				compress_type = "lzo";
 540				info->compress_type = BTRFS_COMPRESS_LZO;
 
 541				btrfs_set_opt(info->mount_opt, COMPRESS);
 542				btrfs_clear_opt(info->mount_opt, NODATACOW);
 543				btrfs_clear_opt(info->mount_opt, NODATASUM);
 544				btrfs_set_fs_incompat(info, COMPRESS_LZO);
 545				no_compress = 0;
 546			} else if (strcmp(args[0].from, "zstd") == 0) {
 547				compress_type = "zstd";
 548				info->compress_type = BTRFS_COMPRESS_ZSTD;
 
 
 
 
 549				btrfs_set_opt(info->mount_opt, COMPRESS);
 550				btrfs_clear_opt(info->mount_opt, NODATACOW);
 551				btrfs_clear_opt(info->mount_opt, NODATASUM);
 552				btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
 553				no_compress = 0;
 554			} else if (strncmp(args[0].from, "no", 2) == 0) {
 555				compress_type = "no";
 
 
 556				btrfs_clear_opt(info->mount_opt, COMPRESS);
 557				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 558				compress_force = false;
 559				no_compress++;
 560			} else {
 561				ret = -EINVAL;
 562				goto out;
 563			}
 564
 565			if (compress_force) {
 566				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
 567			} else {
 568				/*
 569				 * If we remount from compress-force=xxx to
 570				 * compress=xxx, we need clear FORCE_COMPRESS
 571				 * flag, otherwise, there is no way for users
 572				 * to disable forcible compression separately.
 573				 */
 574				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 575			}
 576			if ((btrfs_test_opt(info, COMPRESS) &&
 577			     (info->compress_type != saved_compress_type ||
 578			      compress_force != saved_compress_force)) ||
 579			    (!btrfs_test_opt(info, COMPRESS) &&
 580			     no_compress == 1)) {
 581				btrfs_info(info, "%s %s compression, level %d",
 582					   (compress_force) ? "force" : "use",
 583					   compress_type, info->compress_level);
 584			}
 585			compress_force = false;
 586			break;
 587		case Opt_ssd:
 588			btrfs_set_and_info(info, SSD,
 589					   "enabling ssd optimizations");
 590			btrfs_clear_opt(info->mount_opt, NOSSD);
 591			break;
 592		case Opt_ssd_spread:
 593			btrfs_set_and_info(info, SSD,
 594					   "enabling ssd optimizations");
 595			btrfs_set_and_info(info, SSD_SPREAD,
 596					   "using spread ssd allocation scheme");
 597			btrfs_clear_opt(info->mount_opt, NOSSD);
 598			break;
 599		case Opt_nossd:
 600			btrfs_set_opt(info->mount_opt, NOSSD);
 601			btrfs_clear_and_info(info, SSD,
 602					     "not using ssd optimizations");
 603			/* Fallthrough */
 604		case Opt_nossd_spread:
 605			btrfs_clear_and_info(info, SSD_SPREAD,
 606					     "not using spread ssd allocation scheme");
 607			break;
 608		case Opt_barrier:
 609			btrfs_clear_and_info(info, NOBARRIER,
 610					     "turning on barriers");
 611			break;
 612		case Opt_nobarrier:
 613			btrfs_set_and_info(info, NOBARRIER,
 614					   "turning off barriers");
 615			break;
 616		case Opt_thread_pool:
 617			ret = match_int(&args[0], &intarg);
 618			if (ret) {
 619				goto out;
 620			} else if (intarg == 0) {
 621				ret = -EINVAL;
 622				goto out;
 623			}
 624			info->thread_pool_size = intarg;
 625			break;
 626		case Opt_max_inline:
 627			num = match_strdup(&args[0]);
 628			if (num) {
 629				info->max_inline = memparse(num, NULL);
 630				kfree(num);
 631
 632				if (info->max_inline) {
 633					info->max_inline = min_t(u64,
 634						info->max_inline,
 635						info->sectorsize);
 636				}
 637				btrfs_info(info, "max_inline at %llu",
 638					   info->max_inline);
 639			} else {
 640				ret = -ENOMEM;
 641				goto out;
 642			}
 643			break;
 644		case Opt_alloc_start:
 645			btrfs_info(info,
 646				"option alloc_start is obsolete, ignored");
 647			break;
 648		case Opt_acl:
 649#ifdef CONFIG_BTRFS_FS_POSIX_ACL
 650			info->sb->s_flags |= SB_POSIXACL;
 651			break;
 652#else
 653			btrfs_err(info, "support for ACL not compiled in!");
 654			ret = -EINVAL;
 655			goto out;
 656#endif
 657		case Opt_noacl:
 658			info->sb->s_flags &= ~SB_POSIXACL;
 659			break;
 660		case Opt_notreelog:
 661			btrfs_set_and_info(info, NOTREELOG,
 662					   "disabling tree log");
 663			break;
 664		case Opt_treelog:
 665			btrfs_clear_and_info(info, NOTREELOG,
 666					     "enabling tree log");
 667			break;
 668		case Opt_norecovery:
 669		case Opt_nologreplay:
 
 
 670			btrfs_set_and_info(info, NOLOGREPLAY,
 671					   "disabling log replay at mount time");
 672			break;
 673		case Opt_flushoncommit:
 674			btrfs_set_and_info(info, FLUSHONCOMMIT,
 675					   "turning on flush-on-commit");
 676			break;
 677		case Opt_noflushoncommit:
 678			btrfs_clear_and_info(info, FLUSHONCOMMIT,
 679					     "turning off flush-on-commit");
 680			break;
 681		case Opt_ratio:
 682			ret = match_int(&args[0], &intarg);
 683			if (ret)
 684				goto out;
 685			info->metadata_ratio = intarg;
 686			btrfs_info(info, "metadata ratio %u",
 687				   info->metadata_ratio);
 688			break;
 689		case Opt_discard:
 690			btrfs_set_and_info(info, DISCARD,
 691					   "turning on discard");
 
 
 
 
 
 
 
 
 
 
 
 
 692			break;
 693		case Opt_nodiscard:
 694			btrfs_clear_and_info(info, DISCARD,
 695					     "turning off discard");
 
 
 696			break;
 697		case Opt_space_cache:
 698		case Opt_space_cache_version:
 699			if (token == Opt_space_cache ||
 700			    strcmp(args[0].from, "v1") == 0) {
 701				btrfs_clear_opt(info->mount_opt,
 702						FREE_SPACE_TREE);
 703				btrfs_set_and_info(info, SPACE_CACHE,
 704					   "enabling disk space caching");
 705			} else if (strcmp(args[0].from, "v2") == 0) {
 706				btrfs_clear_opt(info->mount_opt,
 707						SPACE_CACHE);
 708				btrfs_set_and_info(info, FREE_SPACE_TREE,
 709						   "enabling free space tree");
 710			} else {
 711				ret = -EINVAL;
 712				goto out;
 713			}
 714			break;
 715		case Opt_rescan_uuid_tree:
 716			btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
 717			break;
 718		case Opt_no_space_cache:
 719			if (btrfs_test_opt(info, SPACE_CACHE)) {
 720				btrfs_clear_and_info(info, SPACE_CACHE,
 721					     "disabling disk space caching");
 722			}
 723			if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
 724				btrfs_clear_and_info(info, FREE_SPACE_TREE,
 725					     "disabling free space tree");
 726			}
 727			break;
 728		case Opt_inode_cache:
 729			btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
 730					   "enabling inode map caching");
 731			break;
 732		case Opt_noinode_cache:
 733			btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
 734					     "disabling inode map caching");
 735			break;
 736		case Opt_clear_cache:
 737			btrfs_set_and_info(info, CLEAR_CACHE,
 738					   "force clearing of disk cache");
 739			break;
 740		case Opt_user_subvol_rm_allowed:
 741			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
 742			break;
 743		case Opt_enospc_debug:
 744			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
 745			break;
 746		case Opt_noenospc_debug:
 747			btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
 748			break;
 749		case Opt_defrag:
 750			btrfs_set_and_info(info, AUTO_DEFRAG,
 751					   "enabling auto defrag");
 752			break;
 753		case Opt_nodefrag:
 754			btrfs_clear_and_info(info, AUTO_DEFRAG,
 755					     "disabling auto defrag");
 756			break;
 757		case Opt_recovery:
 758			btrfs_warn(info,
 759				   "'recovery' is deprecated, use 'usebackuproot' instead");
 760		case Opt_usebackuproot:
 
 
 
 
 761			btrfs_info(info,
 762				   "trying to use backup root at mount time");
 763			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 764			break;
 765		case Opt_skip_balance:
 766			btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
 767			break;
 768#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 769		case Opt_check_integrity_including_extent_data:
 770			btrfs_info(info,
 771				   "enabling check integrity including extent data");
 772			btrfs_set_opt(info->mount_opt,
 773				      CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
 774			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 775			break;
 776		case Opt_check_integrity:
 777			btrfs_info(info, "enabling check integrity");
 778			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 779			break;
 780		case Opt_check_integrity_print_mask:
 781			ret = match_int(&args[0], &intarg);
 782			if (ret)
 783				goto out;
 784			info->check_integrity_print_mask = intarg;
 785			btrfs_info(info, "check_integrity_print_mask 0x%x",
 786				   info->check_integrity_print_mask);
 787			break;
 788#else
 789		case Opt_check_integrity_including_extent_data:
 790		case Opt_check_integrity:
 791		case Opt_check_integrity_print_mask:
 792			btrfs_err(info,
 793				  "support for check_integrity* not compiled in!");
 794			ret = -EINVAL;
 795			goto out;
 796#endif
 797		case Opt_fatal_errors:
 798			if (strcmp(args[0].from, "panic") == 0)
 799				btrfs_set_opt(info->mount_opt,
 800					      PANIC_ON_FATAL_ERROR);
 801			else if (strcmp(args[0].from, "bug") == 0)
 802				btrfs_clear_opt(info->mount_opt,
 803					      PANIC_ON_FATAL_ERROR);
 804			else {
 805				ret = -EINVAL;
 806				goto out;
 807			}
 808			break;
 809		case Opt_commit_interval:
 810			intarg = 0;
 811			ret = match_int(&args[0], &intarg);
 812			if (ret)
 813				goto out;
 814			if (intarg == 0) {
 815				btrfs_info(info,
 816					   "using default commit interval %us",
 817					   BTRFS_DEFAULT_COMMIT_INTERVAL);
 818				intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
 819			} else if (intarg > 300) {
 820				btrfs_warn(info, "excessive commit interval %d",
 821					   intarg);
 822			}
 823			info->commit_interval = intarg;
 824			break;
 
 
 
 
 
 825#ifdef CONFIG_BTRFS_DEBUG
 826		case Opt_fragment_all:
 827			btrfs_info(info, "fragmenting all space");
 828			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 829			btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
 830			break;
 831		case Opt_fragment_metadata:
 832			btrfs_info(info, "fragmenting metadata");
 833			btrfs_set_opt(info->mount_opt,
 834				      FRAGMENT_METADATA);
 835			break;
 836		case Opt_fragment_data:
 837			btrfs_info(info, "fragmenting data");
 838			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 839			break;
 840#endif
 841#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 842		case Opt_ref_verify:
 843			btrfs_info(info, "doing ref verification");
 844			btrfs_set_opt(info->mount_opt, REF_VERIFY);
 845			break;
 846#endif
 847		case Opt_err:
 848			btrfs_info(info, "unrecognized mount option '%s'", p);
 849			ret = -EINVAL;
 850			goto out;
 851		default:
 852			break;
 853		}
 854	}
 855check:
 856	/*
 857	 * Extra check for current option against current flag
 858	 */
 859	if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
 860		btrfs_err(info,
 861			  "nologreplay must be used with ro mount option");
 
 862		ret = -EINVAL;
 863	}
 864out:
 865	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
 866	    !btrfs_test_opt(info, FREE_SPACE_TREE) &&
 867	    !btrfs_test_opt(info, CLEAR_CACHE)) {
 868		btrfs_err(info, "cannot disable free space tree");
 869		ret = -EINVAL;
 870
 871	}
 
 
 872	if (!ret && btrfs_test_opt(info, SPACE_CACHE))
 873		btrfs_info(info, "disk space caching is enabled");
 874	if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
 875		btrfs_info(info, "using free space tree");
 876	return ret;
 877}
 878
 879/*
 880 * Parse mount options that are required early in the mount process.
 881 *
 882 * All other options will be parsed on much later in the mount process and
 883 * only when we need to allocate a new super block.
 884 */
 885static int btrfs_parse_early_options(const char *options, fmode_t flags,
 886		void *holder, struct btrfs_fs_devices **fs_devices)
 887{
 888	substring_t args[MAX_OPT_ARGS];
 889	char *device_name, *opts, *orig, *p;
 
 890	int error = 0;
 891
 
 
 892	if (!options)
 893		return 0;
 894
 895	/*
 896	 * strsep changes the string, duplicate it because btrfs_parse_options
 897	 * gets called later
 898	 */
 899	opts = kstrdup(options, GFP_KERNEL);
 900	if (!opts)
 901		return -ENOMEM;
 902	orig = opts;
 903
 904	while ((p = strsep(&opts, ",")) != NULL) {
 905		int token;
 906
 907		if (!*p)
 908			continue;
 909
 910		token = match_token(p, tokens, args);
 911		if (token == Opt_device) {
 912			device_name = match_strdup(&args[0]);
 913			if (!device_name) {
 914				error = -ENOMEM;
 915				goto out;
 916			}
 917			error = btrfs_scan_one_device(device_name,
 918					flags, holder, fs_devices);
 919			kfree(device_name);
 920			if (error)
 
 921				goto out;
 
 922		}
 923	}
 924
 925out:
 926	kfree(orig);
 927	return error;
 928}
 929
 930/*
 931 * Parse mount options that are related to subvolume id
 932 *
 933 * The value is later passed to mount_subvol()
 934 */
 935static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
 936		char **subvol_name, u64 *subvol_objectid)
 937{
 938	substring_t args[MAX_OPT_ARGS];
 939	char *opts, *orig, *p;
 940	int error = 0;
 941	u64 subvolid;
 942
 943	if (!options)
 944		return 0;
 945
 946	/*
 947	 * strsep changes the string, duplicate it because
 948	 * btrfs_parse_early_options gets called later
 949	 */
 950	opts = kstrdup(options, GFP_KERNEL);
 951	if (!opts)
 952		return -ENOMEM;
 953	orig = opts;
 954
 955	while ((p = strsep(&opts, ",")) != NULL) {
 956		int token;
 957		if (!*p)
 958			continue;
 959
 960		token = match_token(p, tokens, args);
 961		switch (token) {
 962		case Opt_subvol:
 963			kfree(*subvol_name);
 964			*subvol_name = match_strdup(&args[0]);
 965			if (!*subvol_name) {
 966				error = -ENOMEM;
 967				goto out;
 968			}
 969			break;
 970		case Opt_subvolid:
 971			error = match_u64(&args[0], &subvolid);
 972			if (error)
 973				goto out;
 974
 975			/* we want the original fs_tree */
 976			if (subvolid == 0)
 977				subvolid = BTRFS_FS_TREE_OBJECTID;
 978
 979			*subvol_objectid = subvolid;
 980			break;
 981		case Opt_subvolrootid:
 982			pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
 983			break;
 984		default:
 985			break;
 986		}
 987	}
 988
 989out:
 990	kfree(orig);
 991	return error;
 992}
 993
 994static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
 995					   u64 subvol_objectid)
 996{
 997	struct btrfs_root *root = fs_info->tree_root;
 998	struct btrfs_root *fs_root;
 999	struct btrfs_root_ref *root_ref;
1000	struct btrfs_inode_ref *inode_ref;
1001	struct btrfs_key key;
1002	struct btrfs_path *path = NULL;
1003	char *name = NULL, *ptr;
1004	u64 dirid;
1005	int len;
1006	int ret;
1007
1008	path = btrfs_alloc_path();
1009	if (!path) {
1010		ret = -ENOMEM;
1011		goto err;
1012	}
1013	path->leave_spinning = 1;
1014
1015	name = kmalloc(PATH_MAX, GFP_KERNEL);
1016	if (!name) {
1017		ret = -ENOMEM;
1018		goto err;
1019	}
1020	ptr = name + PATH_MAX - 1;
1021	ptr[0] = '\0';
1022
1023	/*
1024	 * Walk up the subvolume trees in the tree of tree roots by root
1025	 * backrefs until we hit the top-level subvolume.
1026	 */
1027	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
1028		key.objectid = subvol_objectid;
1029		key.type = BTRFS_ROOT_BACKREF_KEY;
1030		key.offset = (u64)-1;
1031
1032		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1033		if (ret < 0) {
1034			goto err;
1035		} else if (ret > 0) {
1036			ret = btrfs_previous_item(root, path, subvol_objectid,
1037						  BTRFS_ROOT_BACKREF_KEY);
1038			if (ret < 0) {
1039				goto err;
1040			} else if (ret > 0) {
1041				ret = -ENOENT;
1042				goto err;
1043			}
1044		}
1045
1046		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1047		subvol_objectid = key.offset;
1048
1049		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1050					  struct btrfs_root_ref);
1051		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
1052		ptr -= len + 1;
1053		if (ptr < name) {
1054			ret = -ENAMETOOLONG;
1055			goto err;
1056		}
1057		read_extent_buffer(path->nodes[0], ptr + 1,
1058				   (unsigned long)(root_ref + 1), len);
1059		ptr[0] = '/';
1060		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
1061		btrfs_release_path(path);
1062
1063		key.objectid = subvol_objectid;
1064		key.type = BTRFS_ROOT_ITEM_KEY;
1065		key.offset = (u64)-1;
1066		fs_root = btrfs_read_fs_root_no_name(fs_info, &key);
1067		if (IS_ERR(fs_root)) {
1068			ret = PTR_ERR(fs_root);
 
1069			goto err;
1070		}
1071
1072		/*
1073		 * Walk up the filesystem tree by inode refs until we hit the
1074		 * root directory.
1075		 */
1076		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
1077			key.objectid = dirid;
1078			key.type = BTRFS_INODE_REF_KEY;
1079			key.offset = (u64)-1;
1080
1081			ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1082			if (ret < 0) {
1083				goto err;
1084			} else if (ret > 0) {
1085				ret = btrfs_previous_item(fs_root, path, dirid,
1086							  BTRFS_INODE_REF_KEY);
1087				if (ret < 0) {
1088					goto err;
1089				} else if (ret > 0) {
1090					ret = -ENOENT;
1091					goto err;
1092				}
1093			}
1094
1095			btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1096			dirid = key.offset;
1097
1098			inode_ref = btrfs_item_ptr(path->nodes[0],
1099						   path->slots[0],
1100						   struct btrfs_inode_ref);
1101			len = btrfs_inode_ref_name_len(path->nodes[0],
1102						       inode_ref);
1103			ptr -= len + 1;
1104			if (ptr < name) {
1105				ret = -ENAMETOOLONG;
1106				goto err;
1107			}
1108			read_extent_buffer(path->nodes[0], ptr + 1,
1109					   (unsigned long)(inode_ref + 1), len);
1110			ptr[0] = '/';
1111			btrfs_release_path(path);
1112		}
 
 
1113	}
1114
1115	btrfs_free_path(path);
1116	if (ptr == name + PATH_MAX - 1) {
1117		name[0] = '/';
1118		name[1] = '\0';
1119	} else {
1120		memmove(name, ptr, name + PATH_MAX - ptr);
1121	}
1122	return name;
1123
1124err:
 
1125	btrfs_free_path(path);
1126	kfree(name);
1127	return ERR_PTR(ret);
1128}
1129
1130static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
1131{
1132	struct btrfs_root *root = fs_info->tree_root;
1133	struct btrfs_dir_item *di;
1134	struct btrfs_path *path;
1135	struct btrfs_key location;
1136	u64 dir_id;
1137
1138	path = btrfs_alloc_path();
1139	if (!path)
1140		return -ENOMEM;
1141	path->leave_spinning = 1;
1142
1143	/*
1144	 * Find the "default" dir item which points to the root item that we
1145	 * will mount by default if we haven't been given a specific subvolume
1146	 * to mount.
1147	 */
1148	dir_id = btrfs_super_root_dir(fs_info->super_copy);
1149	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
1150	if (IS_ERR(di)) {
1151		btrfs_free_path(path);
1152		return PTR_ERR(di);
1153	}
1154	if (!di) {
1155		/*
1156		 * Ok the default dir item isn't there.  This is weird since
1157		 * it's always been there, but don't freak out, just try and
1158		 * mount the top-level subvolume.
1159		 */
1160		btrfs_free_path(path);
1161		*objectid = BTRFS_FS_TREE_OBJECTID;
1162		return 0;
1163	}
1164
1165	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1166	btrfs_free_path(path);
1167	*objectid = location.objectid;
1168	return 0;
1169}
1170
1171static int btrfs_fill_super(struct super_block *sb,
1172			    struct btrfs_fs_devices *fs_devices,
1173			    void *data)
1174{
1175	struct inode *inode;
1176	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1177	struct btrfs_key key;
1178	int err;
1179
1180	sb->s_maxbytes = MAX_LFS_FILESIZE;
1181	sb->s_magic = BTRFS_SUPER_MAGIC;
1182	sb->s_op = &btrfs_super_ops;
1183	sb->s_d_op = &btrfs_dentry_operations;
1184	sb->s_export_op = &btrfs_export_ops;
1185	sb->s_xattr = btrfs_xattr_handlers;
1186	sb->s_time_gran = 1;
1187#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1188	sb->s_flags |= SB_POSIXACL;
1189#endif
1190	sb->s_flags |= SB_I_VERSION;
1191	sb->s_iflags |= SB_I_CGROUPWB;
1192
1193	err = super_setup_bdi(sb);
1194	if (err) {
1195		btrfs_err(fs_info, "super_setup_bdi failed");
1196		return err;
1197	}
1198
1199	err = open_ctree(sb, fs_devices, (char *)data);
1200	if (err) {
1201		btrfs_err(fs_info, "open_ctree failed");
1202		return err;
1203	}
1204
1205	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
1206	key.type = BTRFS_INODE_ITEM_KEY;
1207	key.offset = 0;
1208	inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
1209	if (IS_ERR(inode)) {
1210		err = PTR_ERR(inode);
1211		goto fail_close;
1212	}
1213
1214	sb->s_root = d_make_root(inode);
1215	if (!sb->s_root) {
1216		err = -ENOMEM;
1217		goto fail_close;
1218	}
1219
1220	cleancache_init_fs(sb);
1221	sb->s_flags |= SB_ACTIVE;
1222	return 0;
1223
1224fail_close:
1225	close_ctree(fs_info);
1226	return err;
1227}
1228
1229int btrfs_sync_fs(struct super_block *sb, int wait)
1230{
1231	struct btrfs_trans_handle *trans;
1232	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1233	struct btrfs_root *root = fs_info->tree_root;
1234
1235	trace_btrfs_sync_fs(fs_info, wait);
1236
1237	if (!wait) {
1238		filemap_flush(fs_info->btree_inode->i_mapping);
1239		return 0;
1240	}
1241
1242	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1243
1244	trans = btrfs_attach_transaction_barrier(root);
1245	if (IS_ERR(trans)) {
1246		/* no transaction, don't bother */
1247		if (PTR_ERR(trans) == -ENOENT) {
1248			/*
1249			 * Exit unless we have some pending changes
1250			 * that need to go through commit
1251			 */
1252			if (fs_info->pending_changes == 0)
1253				return 0;
1254			/*
1255			 * A non-blocking test if the fs is frozen. We must not
1256			 * start a new transaction here otherwise a deadlock
1257			 * happens. The pending operations are delayed to the
1258			 * next commit after thawing.
1259			 */
1260			if (sb_start_write_trylock(sb))
1261				sb_end_write(sb);
1262			else
1263				return 0;
1264			trans = btrfs_start_transaction(root, 0);
1265		}
1266		if (IS_ERR(trans))
1267			return PTR_ERR(trans);
1268	}
1269	return btrfs_commit_transaction(trans);
1270}
1271
 
 
 
 
 
 
1272static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1273{
1274	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1275	const char *compress_type;
 
 
1276
1277	if (btrfs_test_opt(info, DEGRADED))
1278		seq_puts(seq, ",degraded");
1279	if (btrfs_test_opt(info, NODATASUM))
1280		seq_puts(seq, ",nodatasum");
1281	if (btrfs_test_opt(info, NODATACOW))
1282		seq_puts(seq, ",nodatacow");
1283	if (btrfs_test_opt(info, NOBARRIER))
1284		seq_puts(seq, ",nobarrier");
1285	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1286		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1287	if (info->thread_pool_size !=  min_t(unsigned long,
1288					     num_online_cpus() + 2, 8))
1289		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1290	if (btrfs_test_opt(info, COMPRESS)) {
1291		compress_type = btrfs_compress_type2str(info->compress_type);
1292		if (btrfs_test_opt(info, FORCE_COMPRESS))
1293			seq_printf(seq, ",compress-force=%s", compress_type);
1294		else
1295			seq_printf(seq, ",compress=%s", compress_type);
1296		if (info->compress_level)
1297			seq_printf(seq, ":%d", info->compress_level);
1298	}
1299	if (btrfs_test_opt(info, NOSSD))
1300		seq_puts(seq, ",nossd");
1301	if (btrfs_test_opt(info, SSD_SPREAD))
1302		seq_puts(seq, ",ssd_spread");
1303	else if (btrfs_test_opt(info, SSD))
1304		seq_puts(seq, ",ssd");
1305	if (btrfs_test_opt(info, NOTREELOG))
1306		seq_puts(seq, ",notreelog");
1307	if (btrfs_test_opt(info, NOLOGREPLAY))
1308		seq_puts(seq, ",nologreplay");
 
 
 
 
 
 
1309	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1310		seq_puts(seq, ",flushoncommit");
1311	if (btrfs_test_opt(info, DISCARD))
1312		seq_puts(seq, ",discard");
 
 
1313	if (!(info->sb->s_flags & SB_POSIXACL))
1314		seq_puts(seq, ",noacl");
1315	if (btrfs_test_opt(info, SPACE_CACHE))
1316		seq_puts(seq, ",space_cache");
1317	else if (btrfs_test_opt(info, FREE_SPACE_TREE))
1318		seq_puts(seq, ",space_cache=v2");
1319	else
1320		seq_puts(seq, ",nospace_cache");
1321	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1322		seq_puts(seq, ",rescan_uuid_tree");
1323	if (btrfs_test_opt(info, CLEAR_CACHE))
1324		seq_puts(seq, ",clear_cache");
1325	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1326		seq_puts(seq, ",user_subvol_rm_allowed");
1327	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1328		seq_puts(seq, ",enospc_debug");
1329	if (btrfs_test_opt(info, AUTO_DEFRAG))
1330		seq_puts(seq, ",autodefrag");
1331	if (btrfs_test_opt(info, INODE_MAP_CACHE))
1332		seq_puts(seq, ",inode_cache");
1333	if (btrfs_test_opt(info, SKIP_BALANCE))
1334		seq_puts(seq, ",skip_balance");
1335#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1336	if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
1337		seq_puts(seq, ",check_int_data");
1338	else if (btrfs_test_opt(info, CHECK_INTEGRITY))
1339		seq_puts(seq, ",check_int");
1340	if (info->check_integrity_print_mask)
1341		seq_printf(seq, ",check_int_print_mask=%d",
1342				info->check_integrity_print_mask);
1343#endif
1344	if (info->metadata_ratio)
1345		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1346	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1347		seq_puts(seq, ",fatal_errors=panic");
1348	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1349		seq_printf(seq, ",commit=%u", info->commit_interval);
1350#ifdef CONFIG_BTRFS_DEBUG
1351	if (btrfs_test_opt(info, FRAGMENT_DATA))
1352		seq_puts(seq, ",fragment=data");
1353	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1354		seq_puts(seq, ",fragment=metadata");
1355#endif
1356	if (btrfs_test_opt(info, REF_VERIFY))
1357		seq_puts(seq, ",ref_verify");
1358	seq_printf(seq, ",subvolid=%llu",
1359		  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1360	seq_puts(seq, ",subvol=");
1361	seq_dentry(seq, dentry, " \t\n\\");
 
 
 
 
 
1362	return 0;
1363}
1364
1365static int btrfs_test_super(struct super_block *s, void *data)
1366{
1367	struct btrfs_fs_info *p = data;
1368	struct btrfs_fs_info *fs_info = btrfs_sb(s);
1369
1370	return fs_info->fs_devices == p->fs_devices;
1371}
1372
1373static int btrfs_set_super(struct super_block *s, void *data)
1374{
1375	int err = set_anon_super(s, data);
1376	if (!err)
1377		s->s_fs_info = data;
1378	return err;
1379}
1380
1381/*
1382 * subvolumes are identified by ino 256
1383 */
1384static inline int is_subvolume_inode(struct inode *inode)
1385{
1386	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1387		return 1;
1388	return 0;
1389}
1390
1391static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1392				   const char *device_name, struct vfsmount *mnt)
1393{
1394	struct dentry *root;
1395	int ret;
1396
1397	if (!subvol_name) {
1398		if (!subvol_objectid) {
1399			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1400							  &subvol_objectid);
1401			if (ret) {
1402				root = ERR_PTR(ret);
1403				goto out;
1404			}
1405		}
1406		subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
1407							    subvol_objectid);
1408		if (IS_ERR(subvol_name)) {
1409			root = ERR_CAST(subvol_name);
1410			subvol_name = NULL;
1411			goto out;
1412		}
1413
1414	}
1415
1416	root = mount_subtree(mnt, subvol_name);
1417	/* mount_subtree() drops our reference on the vfsmount. */
1418	mnt = NULL;
1419
1420	if (!IS_ERR(root)) {
1421		struct super_block *s = root->d_sb;
1422		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1423		struct inode *root_inode = d_inode(root);
1424		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1425
1426		ret = 0;
1427		if (!is_subvolume_inode(root_inode)) {
1428			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1429			       subvol_name);
1430			ret = -EINVAL;
1431		}
1432		if (subvol_objectid && root_objectid != subvol_objectid) {
1433			/*
1434			 * This will also catch a race condition where a
1435			 * subvolume which was passed by ID is renamed and
1436			 * another subvolume is renamed over the old location.
1437			 */
1438			btrfs_err(fs_info,
1439				  "subvol '%s' does not match subvolid %llu",
1440				  subvol_name, subvol_objectid);
1441			ret = -EINVAL;
1442		}
1443		if (ret) {
1444			dput(root);
1445			root = ERR_PTR(ret);
1446			deactivate_locked_super(s);
1447		}
1448	}
1449
1450out:
1451	mntput(mnt);
1452	kfree(subvol_name);
1453	return root;
1454}
1455
1456static int parse_security_options(char *orig_opts,
1457				  struct security_mnt_opts *sec_opts)
1458{
1459	char *secdata = NULL;
1460	int ret = 0;
1461
1462	secdata = alloc_secdata();
1463	if (!secdata)
1464		return -ENOMEM;
1465	ret = security_sb_copy_data(orig_opts, secdata);
1466	if (ret) {
1467		free_secdata(secdata);
1468		return ret;
1469	}
1470	ret = security_sb_parse_opts_str(secdata, sec_opts);
1471	free_secdata(secdata);
1472	return ret;
1473}
1474
1475static int setup_security_options(struct btrfs_fs_info *fs_info,
1476				  struct super_block *sb,
1477				  struct security_mnt_opts *sec_opts)
1478{
1479	int ret = 0;
1480
1481	/*
1482	 * Call security_sb_set_mnt_opts() to check whether new sec_opts
1483	 * is valid.
1484	 */
1485	ret = security_sb_set_mnt_opts(sb, sec_opts, 0, NULL);
1486	if (ret)
1487		return ret;
1488
1489#ifdef CONFIG_SECURITY
1490	if (!fs_info->security_opts.num_mnt_opts) {
1491		/* first time security setup, copy sec_opts to fs_info */
1492		memcpy(&fs_info->security_opts, sec_opts, sizeof(*sec_opts));
1493	} else {
1494		/*
1495		 * Since SELinux (the only one supporting security_mnt_opts)
1496		 * does NOT support changing context during remount/mount of
1497		 * the same sb, this must be the same or part of the same
1498		 * security options, just free it.
1499		 */
1500		security_free_mnt_opts(sec_opts);
1501	}
1502#endif
1503	return ret;
1504}
1505
1506/*
1507 * Find a superblock for the given device / mount point.
1508 *
1509 * Note: This is based on mount_bdev from fs/super.c with a few additions
1510 *       for multiple device setup.  Make sure to keep it in sync.
1511 */
1512static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
1513		int flags, const char *device_name, void *data)
1514{
1515	struct block_device *bdev = NULL;
1516	struct super_block *s;
 
1517	struct btrfs_fs_devices *fs_devices = NULL;
1518	struct btrfs_fs_info *fs_info = NULL;
1519	struct security_mnt_opts new_sec_opts;
1520	fmode_t mode = FMODE_READ;
1521	int error = 0;
1522
1523	if (!(flags & SB_RDONLY))
1524		mode |= FMODE_WRITE;
1525
1526	error = btrfs_parse_early_options(data, mode, fs_type,
1527					  &fs_devices);
1528	if (error) {
1529		return ERR_PTR(error);
1530	}
1531
1532	security_init_mnt_opts(&new_sec_opts);
1533	if (data) {
1534		error = parse_security_options(data, &new_sec_opts);
1535		if (error)
1536			return ERR_PTR(error);
1537	}
1538
1539	error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
1540	if (error)
1541		goto error_sec_opts;
1542
1543	/*
1544	 * Setup a dummy root and fs_info for test/set super.  This is because
1545	 * we don't actually fill this stuff out until open_ctree, but we need
1546	 * it for searching for existing supers, so this lets us do that and
1547	 * then open_ctree will properly initialize everything later.
 
 
1548	 */
1549	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
1550	if (!fs_info) {
1551		error = -ENOMEM;
1552		goto error_sec_opts;
1553	}
1554
1555	fs_info->fs_devices = fs_devices;
1556
1557	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1558	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1559	security_init_mnt_opts(&fs_info->security_opts);
1560	if (!fs_info->super_copy || !fs_info->super_for_commit) {
1561		error = -ENOMEM;
1562		goto error_fs_info;
1563	}
1564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1565	error = btrfs_open_devices(fs_devices, mode, fs_type);
 
1566	if (error)
1567		goto error_fs_info;
1568
1569	if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1570		error = -EACCES;
1571		goto error_close_devices;
1572	}
1573
1574	bdev = fs_devices->latest_bdev;
1575	s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
1576		 fs_info);
1577	if (IS_ERR(s)) {
1578		error = PTR_ERR(s);
1579		goto error_close_devices;
1580	}
1581
1582	if (s->s_root) {
1583		btrfs_close_devices(fs_devices);
1584		free_fs_info(fs_info);
1585		if ((flags ^ s->s_flags) & SB_RDONLY)
1586			error = -EBUSY;
1587	} else {
1588		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1589		btrfs_sb(s)->bdev_holder = fs_type;
 
 
1590		error = btrfs_fill_super(s, fs_devices, data);
1591	}
 
 
 
1592	if (error) {
1593		deactivate_locked_super(s);
1594		goto error_sec_opts;
1595	}
1596
1597	fs_info = btrfs_sb(s);
1598	error = setup_security_options(fs_info, s, &new_sec_opts);
1599	if (error) {
1600		deactivate_locked_super(s);
1601		goto error_sec_opts;
1602	}
1603
1604	return dget(s->s_root);
1605
1606error_close_devices:
1607	btrfs_close_devices(fs_devices);
1608error_fs_info:
1609	free_fs_info(fs_info);
1610error_sec_opts:
1611	security_free_mnt_opts(&new_sec_opts);
1612	return ERR_PTR(error);
1613}
1614
1615/*
1616 * Mount function which is called by VFS layer.
1617 *
1618 * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
1619 * which needs vfsmount* of device's root (/).  This means device's root has to
1620 * be mounted internally in any case.
1621 *
1622 * Operation flow:
1623 *   1. Parse subvol id related options for later use in mount_subvol().
1624 *
1625 *   2. Mount device's root (/) by calling vfs_kern_mount().
1626 *
1627 *      NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
1628 *      first place. In order to avoid calling btrfs_mount() again, we use
1629 *      different file_system_type which is not registered to VFS by
1630 *      register_filesystem() (btrfs_root_fs_type). As a result,
1631 *      btrfs_mount_root() is called. The return value will be used by
1632 *      mount_subtree() in mount_subvol().
1633 *
1634 *   3. Call mount_subvol() to get the dentry of subvolume. Since there is
1635 *      "btrfs subvolume set-default", mount_subvol() is called always.
1636 */
1637static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1638		const char *device_name, void *data)
1639{
1640	struct vfsmount *mnt_root;
1641	struct dentry *root;
1642	fmode_t mode = FMODE_READ;
1643	char *subvol_name = NULL;
1644	u64 subvol_objectid = 0;
1645	int error = 0;
1646
1647	if (!(flags & SB_RDONLY))
1648		mode |= FMODE_WRITE;
1649
1650	error = btrfs_parse_subvol_options(data, mode,
1651					  &subvol_name, &subvol_objectid);
1652	if (error) {
1653		kfree(subvol_name);
1654		return ERR_PTR(error);
1655	}
1656
1657	/* mount device's root (/) */
1658	mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
1659	if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
1660		if (flags & SB_RDONLY) {
1661			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1662				flags & ~SB_RDONLY, device_name, data);
1663		} else {
1664			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1665				flags | SB_RDONLY, device_name, data);
1666			if (IS_ERR(mnt_root)) {
1667				root = ERR_CAST(mnt_root);
 
1668				goto out;
1669			}
1670
1671			down_write(&mnt_root->mnt_sb->s_umount);
1672			error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
1673			up_write(&mnt_root->mnt_sb->s_umount);
1674			if (error < 0) {
1675				root = ERR_PTR(error);
1676				mntput(mnt_root);
 
1677				goto out;
1678			}
1679		}
1680	}
1681	if (IS_ERR(mnt_root)) {
1682		root = ERR_CAST(mnt_root);
 
1683		goto out;
1684	}
1685
1686	/* mount_subvol() will free subvol_name and mnt_root */
1687	root = mount_subvol(subvol_name, subvol_objectid, device_name, mnt_root);
1688
1689out:
1690	return root;
1691}
1692
1693static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1694				     u32 new_pool_size, u32 old_pool_size)
1695{
1696	if (new_pool_size == old_pool_size)
1697		return;
1698
1699	fs_info->thread_pool_size = new_pool_size;
1700
1701	btrfs_info(fs_info, "resize thread pool %d -> %d",
1702	       old_pool_size, new_pool_size);
1703
1704	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1705	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
1706	btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
1707	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1708	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
1709	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
1710	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
1711				new_pool_size);
1712	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1713	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1714	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1715	btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
1716	btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
1717				new_pool_size);
1718}
1719
1720static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info)
1721{
1722	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1723}
1724
1725static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1726				       unsigned long old_opts, int flags)
1727{
1728	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1729	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1730	     (flags & SB_RDONLY))) {
1731		/* wait for any defraggers to finish */
1732		wait_event(fs_info->transaction_wait,
1733			   (atomic_read(&fs_info->defrag_running) == 0));
1734		if (flags & SB_RDONLY)
1735			sync_filesystem(fs_info->sb);
1736	}
1737}
1738
1739static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1740					 unsigned long old_opts)
1741{
 
 
1742	/*
1743	 * We need to cleanup all defragable inodes if the autodefragment is
1744	 * close or the filesystem is read only.
1745	 */
1746	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1747	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1748		btrfs_cleanup_defrag_inodes(fs_info);
1749	}
1750
1751	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
 
 
 
 
 
 
 
 
 
 
1752}
1753
1754static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1755{
1756	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1757	struct btrfs_root *root = fs_info->tree_root;
1758	unsigned old_flags = sb->s_flags;
1759	unsigned long old_opts = fs_info->mount_opt;
1760	unsigned long old_compress_type = fs_info->compress_type;
1761	u64 old_max_inline = fs_info->max_inline;
1762	u32 old_thread_pool_size = fs_info->thread_pool_size;
1763	u32 old_metadata_ratio = fs_info->metadata_ratio;
1764	int ret;
1765
1766	sync_filesystem(sb);
1767	btrfs_remount_prepare(fs_info);
1768
1769	if (data) {
1770		struct security_mnt_opts new_sec_opts;
1771
1772		security_init_mnt_opts(&new_sec_opts);
1773		ret = parse_security_options(data, &new_sec_opts);
 
 
1774		if (ret)
1775			goto restore;
1776		ret = setup_security_options(fs_info, sb,
1777					     &new_sec_opts);
1778		if (ret) {
1779			security_free_mnt_opts(&new_sec_opts);
1780			goto restore;
1781		}
1782	}
1783
1784	ret = btrfs_parse_options(fs_info, data, *flags);
1785	if (ret) {
1786		ret = -EINVAL;
1787		goto restore;
1788	}
1789
1790	btrfs_remount_begin(fs_info, old_opts, *flags);
1791	btrfs_resize_thread_pool(fs_info,
1792		fs_info->thread_pool_size, old_thread_pool_size);
1793
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1794	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
1795		goto out;
1796
1797	if (*flags & SB_RDONLY) {
1798		/*
1799		 * this also happens on 'umount -rf' or on shutdown, when
1800		 * the filesystem is busy.
1801		 */
1802		cancel_work_sync(&fs_info->async_reclaim_work);
 
 
 
1803
1804		/* wait for the uuid_scan task to finish */
1805		down(&fs_info->uuid_tree_rescan_sem);
1806		/* avoid complains from lockdep et al. */
1807		up(&fs_info->uuid_tree_rescan_sem);
1808
1809		sb->s_flags |= SB_RDONLY;
1810
1811		/*
1812		 * Setting SB_RDONLY will put the cleaner thread to
1813		 * sleep at the next loop if it's already active.
1814		 * If it's already asleep, we'll leave unused block
1815		 * groups on disk until we're mounted read-write again
1816		 * unless we clean them up here.
1817		 */
1818		btrfs_delete_unused_bgs(fs_info);
1819
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1820		btrfs_dev_replace_suspend_for_unmount(fs_info);
1821		btrfs_scrub_cancel(fs_info);
1822		btrfs_pause_balance(fs_info);
1823
 
 
 
 
 
 
 
 
1824		ret = btrfs_commit_super(fs_info);
1825		if (ret)
1826			goto restore;
1827	} else {
1828		if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1829			btrfs_err(fs_info,
1830				"Remounting read-write after error is not allowed");
1831			ret = -EINVAL;
1832			goto restore;
1833		}
1834		if (fs_info->fs_devices->rw_devices == 0) {
1835			ret = -EACCES;
1836			goto restore;
1837		}
1838
1839		if (!btrfs_check_rw_degradable(fs_info, NULL)) {
1840			btrfs_warn(fs_info,
1841				"too many missing devices, writeable remount is not allowed");
1842			ret = -EACCES;
1843			goto restore;
1844		}
1845
1846		if (btrfs_super_log_root(fs_info->super_copy) != 0) {
 
 
1847			ret = -EINVAL;
1848			goto restore;
1849		}
1850
1851		ret = btrfs_cleanup_fs_roots(fs_info);
1852		if (ret)
1853			goto restore;
1854
1855		/* recover relocation */
1856		mutex_lock(&fs_info->cleaner_mutex);
1857		ret = btrfs_recover_relocation(root);
1858		mutex_unlock(&fs_info->cleaner_mutex);
1859		if (ret)
1860			goto restore;
 
1861
1862		ret = btrfs_resume_balance_async(fs_info);
 
 
 
 
 
1863		if (ret)
1864			goto restore;
1865
1866		ret = btrfs_resume_dev_replace_async(fs_info);
1867		if (ret) {
1868			btrfs_warn(fs_info, "failed to resume dev_replace");
1869			goto restore;
1870		}
1871
1872		btrfs_qgroup_rescan_resume(fs_info);
1873
1874		if (!fs_info->uuid_root) {
1875			btrfs_info(fs_info, "creating UUID tree");
1876			ret = btrfs_create_uuid_tree(fs_info);
1877			if (ret) {
1878				btrfs_warn(fs_info,
1879					   "failed to create the UUID tree %d",
1880					   ret);
1881				goto restore;
1882			}
1883		}
1884		sb->s_flags &= ~SB_RDONLY;
1885
1886		set_bit(BTRFS_FS_OPEN, &fs_info->flags);
1887	}
1888out:
 
 
 
 
 
 
1889	wake_up_process(fs_info->transaction_kthread);
1890	btrfs_remount_cleanup(fs_info, old_opts);
 
 
 
1891	return 0;
1892
1893restore:
1894	/* We've hit an error - don't reset SB_RDONLY */
1895	if (sb_rdonly(sb))
1896		old_flags |= SB_RDONLY;
 
 
1897	sb->s_flags = old_flags;
1898	fs_info->mount_opt = old_opts;
1899	fs_info->compress_type = old_compress_type;
1900	fs_info->max_inline = old_max_inline;
1901	btrfs_resize_thread_pool(fs_info,
1902		old_thread_pool_size, fs_info->thread_pool_size);
1903	fs_info->metadata_ratio = old_metadata_ratio;
1904	btrfs_remount_cleanup(fs_info, old_opts);
 
 
1905	return ret;
1906}
1907
1908/* Used to sort the devices by max_avail(descending sort) */
1909static int btrfs_cmp_device_free_bytes(const void *dev_info1,
1910				       const void *dev_info2)
1911{
1912	if (((struct btrfs_device_info *)dev_info1)->max_avail >
1913	    ((struct btrfs_device_info *)dev_info2)->max_avail)
1914		return -1;
1915	else if (((struct btrfs_device_info *)dev_info1)->max_avail <
1916		 ((struct btrfs_device_info *)dev_info2)->max_avail)
1917		return 1;
1918	else
1919	return 0;
1920}
1921
1922/*
1923 * sort the devices by max_avail, in which max free extent size of each device
1924 * is stored.(Descending Sort)
1925 */
1926static inline void btrfs_descending_sort_devices(
1927					struct btrfs_device_info *devices,
1928					size_t nr_devices)
1929{
1930	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
1931	     btrfs_cmp_device_free_bytes, NULL);
1932}
1933
1934/*
1935 * The helper to calc the free space on the devices that can be used to store
1936 * file data.
1937 */
1938static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
1939				       u64 *free_bytes)
1940{
1941	struct btrfs_device_info *devices_info;
1942	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
1943	struct btrfs_device *device;
1944	u64 skip_space;
1945	u64 type;
1946	u64 avail_space;
1947	u64 min_stripe_size;
1948	int min_stripes = 1, num_stripes = 1;
1949	int i = 0, nr_devices;
 
1950
1951	/*
1952	 * We aren't under the device list lock, so this is racy-ish, but good
1953	 * enough for our purposes.
1954	 */
1955	nr_devices = fs_info->fs_devices->open_devices;
1956	if (!nr_devices) {
1957		smp_mb();
1958		nr_devices = fs_info->fs_devices->open_devices;
1959		ASSERT(nr_devices);
1960		if (!nr_devices) {
1961			*free_bytes = 0;
1962			return 0;
1963		}
1964	}
1965
1966	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
1967			       GFP_KERNEL);
1968	if (!devices_info)
1969		return -ENOMEM;
1970
1971	/* calc min stripe number for data space allocation */
1972	type = btrfs_data_alloc_profile(fs_info);
1973	if (type & BTRFS_BLOCK_GROUP_RAID0) {
1974		min_stripes = 2;
 
1975		num_stripes = nr_devices;
1976	} else if (type & BTRFS_BLOCK_GROUP_RAID1) {
1977		min_stripes = 2;
1978		num_stripes = 2;
1979	} else if (type & BTRFS_BLOCK_GROUP_RAID10) {
1980		min_stripes = 4;
 
 
 
1981		num_stripes = 4;
1982	}
1983
1984	if (type & BTRFS_BLOCK_GROUP_DUP)
1985		min_stripe_size = 2 * BTRFS_STRIPE_LEN;
1986	else
1987		min_stripe_size = BTRFS_STRIPE_LEN;
1988
1989	rcu_read_lock();
1990	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
1991		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
1992						&device->dev_state) ||
1993		    !device->bdev ||
1994		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
1995			continue;
1996
1997		if (i >= nr_devices)
1998			break;
1999
2000		avail_space = device->total_bytes - device->bytes_used;
2001
2002		/* align with stripe_len */
2003		avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN);
2004		avail_space *= BTRFS_STRIPE_LEN;
2005
2006		/*
2007		 * In order to avoid overwriting the superblock on the drive,
2008		 * btrfs starts at an offset of at least 1MB when doing chunk
2009		 * allocation.
 
 
 
2010		 */
2011		skip_space = SZ_1M;
2012
2013		/*
2014		 * we can use the free space in [0, skip_space - 1], subtract
2015		 * it from the total.
2016		 */
2017		if (avail_space && avail_space >= skip_space)
2018			avail_space -= skip_space;
2019		else
2020			avail_space = 0;
2021
2022		if (avail_space < min_stripe_size)
2023			continue;
2024
 
 
2025		devices_info[i].dev = device;
2026		devices_info[i].max_avail = avail_space;
2027
2028		i++;
2029	}
2030	rcu_read_unlock();
2031
2032	nr_devices = i;
2033
2034	btrfs_descending_sort_devices(devices_info, nr_devices);
2035
2036	i = nr_devices - 1;
2037	avail_space = 0;
2038	while (nr_devices >= min_stripes) {
2039		if (num_stripes > nr_devices)
2040			num_stripes = nr_devices;
2041
2042		if (devices_info[i].max_avail >= min_stripe_size) {
2043			int j;
2044			u64 alloc_size;
2045
2046			avail_space += devices_info[i].max_avail * num_stripes;
2047			alloc_size = devices_info[i].max_avail;
2048			for (j = i + 1 - num_stripes; j <= i; j++)
2049				devices_info[j].max_avail -= alloc_size;
2050		}
2051		i--;
2052		nr_devices--;
2053	}
2054
2055	kfree(devices_info);
2056	*free_bytes = avail_space;
2057	return 0;
2058}
2059
2060/*
2061 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
2062 *
2063 * If there's a redundant raid level at DATA block groups, use the respective
2064 * multiplier to scale the sizes.
2065 *
2066 * Unused device space usage is based on simulating the chunk allocator
2067 * algorithm that respects the device sizes and order of allocations.  This is
2068 * a close approximation of the actual use but there are other factors that may
2069 * change the result (like a new metadata chunk).
2070 *
2071 * If metadata is exhausted, f_bavail will be 0.
2072 */
2073static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2074{
2075	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
2076	struct btrfs_super_block *disk_super = fs_info->super_copy;
2077	struct list_head *head = &fs_info->space_info;
2078	struct btrfs_space_info *found;
2079	u64 total_used = 0;
2080	u64 total_free_data = 0;
2081	u64 total_free_meta = 0;
2082	int bits = dentry->d_sb->s_blocksize_bits;
2083	__be32 *fsid = (__be32 *)fs_info->fsid;
2084	unsigned factor = 1;
2085	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
2086	int ret;
2087	u64 thresh = 0;
2088	int mixed = 0;
2089
2090	rcu_read_lock();
2091	list_for_each_entry_rcu(found, head, list) {
2092		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
2093			int i;
2094
2095			total_free_data += found->disk_total - found->disk_used;
2096			total_free_data -=
2097				btrfs_account_ro_block_groups_free_space(found);
2098
2099			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2100				if (!list_empty(&found->block_groups[i])) {
2101					switch (i) {
2102					case BTRFS_RAID_DUP:
2103					case BTRFS_RAID_RAID1:
2104					case BTRFS_RAID_RAID10:
2105						factor = 2;
2106					}
2107				}
2108			}
2109		}
2110
2111		/*
2112		 * Metadata in mixed block goup profiles are accounted in data
2113		 */
2114		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
2115			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
2116				mixed = 1;
2117			else
2118				total_free_meta += found->disk_total -
2119					found->disk_used;
2120		}
2121
2122		total_used += found->disk_used;
2123	}
2124
2125	rcu_read_unlock();
2126
2127	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
2128	buf->f_blocks >>= bits;
2129	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
2130
2131	/* Account global block reserve as used, it's in logical size already */
2132	spin_lock(&block_rsv->lock);
2133	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
2134	if (buf->f_bfree >= block_rsv->size >> bits)
2135		buf->f_bfree -= block_rsv->size >> bits;
2136	else
2137		buf->f_bfree = 0;
2138	spin_unlock(&block_rsv->lock);
2139
2140	buf->f_bavail = div_u64(total_free_data, factor);
2141	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
2142	if (ret)
2143		return ret;
2144	buf->f_bavail += div_u64(total_free_data, factor);
2145	buf->f_bavail = buf->f_bavail >> bits;
2146
2147	/*
2148	 * We calculate the remaining metadata space minus global reserve. If
2149	 * this is (supposedly) smaller than zero, there's no space. But this
2150	 * does not hold in practice, the exhausted state happens where's still
2151	 * some positive delta. So we apply some guesswork and compare the
2152	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
2153	 *
2154	 * We probably cannot calculate the exact threshold value because this
2155	 * depends on the internal reservations requested by various
2156	 * operations, so some operations that consume a few metadata will
2157	 * succeed even if the Avail is zero. But this is better than the other
2158	 * way around.
2159	 */
2160	thresh = SZ_4M;
2161
2162	if (!mixed && total_free_meta - thresh < block_rsv->size)
 
 
 
 
 
 
 
 
2163		buf->f_bavail = 0;
2164
2165	buf->f_type = BTRFS_SUPER_MAGIC;
2166	buf->f_bsize = dentry->d_sb->s_blocksize;
2167	buf->f_namelen = BTRFS_NAME_LEN;
2168
2169	/* We treat it as constant endianness (it doesn't matter _which_)
2170	   because we want the fsid to come out the same whether mounted
2171	   on a big-endian or little-endian host */
2172	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
2173	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
2174	/* Mask in the root object ID too, to disambiguate subvols */
2175	buf->f_fsid.val[0] ^= BTRFS_I(d_inode(dentry))->root->objectid >> 32;
2176	buf->f_fsid.val[1] ^= BTRFS_I(d_inode(dentry))->root->objectid;
 
 
2177
2178	return 0;
2179}
2180
2181static void btrfs_kill_super(struct super_block *sb)
2182{
2183	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2184	kill_anon_super(sb);
2185	free_fs_info(fs_info);
2186}
2187
2188static struct file_system_type btrfs_fs_type = {
2189	.owner		= THIS_MODULE,
2190	.name		= "btrfs",
2191	.mount		= btrfs_mount,
2192	.kill_sb	= btrfs_kill_super,
2193	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2194};
2195
2196static struct file_system_type btrfs_root_fs_type = {
2197	.owner		= THIS_MODULE,
2198	.name		= "btrfs",
2199	.mount		= btrfs_mount_root,
2200	.kill_sb	= btrfs_kill_super,
2201	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2202};
2203
2204MODULE_ALIAS_FS("btrfs");
2205
2206static int btrfs_control_open(struct inode *inode, struct file *file)
2207{
2208	/*
2209	 * The control file's private_data is used to hold the
2210	 * transaction when it is started and is used to keep
2211	 * track of whether a transaction is already in progress.
2212	 */
2213	file->private_data = NULL;
2214	return 0;
2215}
2216
2217/*
2218 * used by btrfsctl to scan devices when no FS is mounted
2219 */
2220static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2221				unsigned long arg)
2222{
2223	struct btrfs_ioctl_vol_args *vol;
2224	struct btrfs_fs_devices *fs_devices;
2225	int ret = -ENOTTY;
2226
2227	if (!capable(CAP_SYS_ADMIN))
2228		return -EPERM;
2229
2230	vol = memdup_user((void __user *)arg, sizeof(*vol));
2231	if (IS_ERR(vol))
2232		return PTR_ERR(vol);
 
2233
2234	switch (cmd) {
2235	case BTRFS_IOC_SCAN_DEV:
2236		ret = btrfs_scan_one_device(vol->name, FMODE_READ,
2237					    &btrfs_root_fs_type, &fs_devices);
 
 
 
 
 
 
2238		break;
2239	case BTRFS_IOC_DEVICES_READY:
2240		ret = btrfs_scan_one_device(vol->name, FMODE_READ,
2241					    &btrfs_root_fs_type, &fs_devices);
2242		if (ret)
 
 
 
2243			break;
2244		ret = !(fs_devices->num_devices == fs_devices->total_devices);
 
 
 
2245		break;
2246	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2247		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2248		break;
2249	}
2250
2251	kfree(vol);
2252	return ret;
2253}
2254
2255static int btrfs_freeze(struct super_block *sb)
2256{
2257	struct btrfs_trans_handle *trans;
2258	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2259	struct btrfs_root *root = fs_info->tree_root;
2260
2261	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2262	/*
2263	 * We don't need a barrier here, we'll wait for any transaction that
2264	 * could be in progress on other threads (and do delayed iputs that
2265	 * we want to avoid on a frozen filesystem), or do the commit
2266	 * ourselves.
2267	 */
2268	trans = btrfs_attach_transaction_barrier(root);
2269	if (IS_ERR(trans)) {
2270		/* no transaction, don't bother */
2271		if (PTR_ERR(trans) == -ENOENT)
2272			return 0;
2273		return PTR_ERR(trans);
2274	}
2275	return btrfs_commit_transaction(trans);
2276}
2277
2278static int btrfs_unfreeze(struct super_block *sb)
2279{
2280	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2281
2282	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2283	return 0;
2284}
2285
2286static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2287{
2288	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
2289	struct btrfs_fs_devices *cur_devices;
2290	struct btrfs_device *dev, *first_dev = NULL;
2291	struct list_head *head;
2292	struct rcu_string *name;
2293
2294	/*
2295	 * Lightweight locking of the devices. We should not need
2296	 * device_list_mutex here as we only read the device data and the list
2297	 * is protected by RCU.  Even if a device is deleted during the list
2298	 * traversals, we'll get valid data, the freeing callback will wait at
2299	 * least until until the rcu_read_unlock.
2300	 */
2301	rcu_read_lock();
2302	cur_devices = fs_info->fs_devices;
2303	while (cur_devices) {
2304		head = &cur_devices->devices;
2305		list_for_each_entry_rcu(dev, head, dev_list) {
2306			if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2307				continue;
2308			if (!dev->name)
2309				continue;
2310			if (!first_dev || dev->devid < first_dev->devid)
2311				first_dev = dev;
2312		}
2313		cur_devices = cur_devices->seed;
2314	}
2315
2316	if (first_dev) {
2317		name = rcu_dereference(first_dev->name);
2318		seq_escape(m, name->str, " \t\n\\");
2319	} else {
2320		WARN_ON(1);
2321	}
 
 
 
 
 
2322	rcu_read_unlock();
2323	return 0;
2324}
2325
2326static const struct super_operations btrfs_super_ops = {
2327	.drop_inode	= btrfs_drop_inode,
2328	.evict_inode	= btrfs_evict_inode,
2329	.put_super	= btrfs_put_super,
2330	.sync_fs	= btrfs_sync_fs,
2331	.show_options	= btrfs_show_options,
2332	.show_devname	= btrfs_show_devname,
2333	.write_inode	= btrfs_write_inode,
2334	.alloc_inode	= btrfs_alloc_inode,
2335	.destroy_inode	= btrfs_destroy_inode,
 
2336	.statfs		= btrfs_statfs,
2337	.remount_fs	= btrfs_remount,
2338	.freeze_fs	= btrfs_freeze,
2339	.unfreeze_fs	= btrfs_unfreeze,
2340};
2341
2342static const struct file_operations btrfs_ctl_fops = {
2343	.open = btrfs_control_open,
2344	.unlocked_ioctl	 = btrfs_control_ioctl,
2345	.compat_ioctl = btrfs_control_ioctl,
2346	.owner	 = THIS_MODULE,
2347	.llseek = noop_llseek,
2348};
2349
2350static struct miscdevice btrfs_misc = {
2351	.minor		= BTRFS_MINOR,
2352	.name		= "btrfs-control",
2353	.fops		= &btrfs_ctl_fops
2354};
2355
2356MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2357MODULE_ALIAS("devname:btrfs-control");
2358
2359static int __init btrfs_interface_init(void)
2360{
2361	return misc_register(&btrfs_misc);
2362}
2363
2364static __cold void btrfs_interface_exit(void)
2365{
2366	misc_deregister(&btrfs_misc);
2367}
2368
2369static void __init btrfs_print_mod_info(void)
2370{
2371	pr_info("Btrfs loaded, crc32c=%s"
2372#ifdef CONFIG_BTRFS_DEBUG
2373			", debug=on"
2374#endif
2375#ifdef CONFIG_BTRFS_ASSERT
2376			", assert=on"
2377#endif
2378#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2379			", integrity-checker=on"
2380#endif
2381#ifdef CONFIG_BTRFS_FS_REF_VERIFY
2382			", ref-verify=on"
2383#endif
2384			"\n",
2385			crc32c_impl());
 
 
 
 
 
2386}
2387
2388static int __init init_btrfs_fs(void)
2389{
2390	int err;
2391
2392	btrfs_props_init();
2393
2394	err = btrfs_init_sysfs();
2395	if (err)
2396		return err;
2397
2398	btrfs_init_compress();
2399
2400	err = btrfs_init_cachep();
2401	if (err)
2402		goto free_compress;
2403
2404	err = extent_io_init();
2405	if (err)
2406		goto free_cachep;
2407
2408	err = extent_map_init();
2409	if (err)
2410		goto free_extent_io;
2411
 
 
 
 
2412	err = ordered_data_init();
2413	if (err)
2414		goto free_extent_map;
2415
2416	err = btrfs_delayed_inode_init();
2417	if (err)
2418		goto free_ordered_data;
2419
2420	err = btrfs_auto_defrag_init();
2421	if (err)
2422		goto free_delayed_inode;
2423
2424	err = btrfs_delayed_ref_init();
2425	if (err)
2426		goto free_auto_defrag;
2427
2428	err = btrfs_prelim_ref_init();
2429	if (err)
2430		goto free_delayed_ref;
2431
2432	err = btrfs_end_io_wq_init();
2433	if (err)
2434		goto free_prelim_ref;
2435
2436	err = btrfs_interface_init();
2437	if (err)
2438		goto free_end_io_wq;
2439
2440	btrfs_init_lockdep();
2441
2442	btrfs_print_mod_info();
2443
2444	err = btrfs_run_sanity_tests();
2445	if (err)
2446		goto unregister_ioctl;
2447
2448	err = register_filesystem(&btrfs_fs_type);
2449	if (err)
2450		goto unregister_ioctl;
2451
2452	return 0;
2453
2454unregister_ioctl:
2455	btrfs_interface_exit();
2456free_end_io_wq:
2457	btrfs_end_io_wq_exit();
2458free_prelim_ref:
2459	btrfs_prelim_ref_exit();
2460free_delayed_ref:
2461	btrfs_delayed_ref_exit();
2462free_auto_defrag:
2463	btrfs_auto_defrag_exit();
2464free_delayed_inode:
2465	btrfs_delayed_inode_exit();
2466free_ordered_data:
2467	ordered_data_exit();
2468free_extent_map:
2469	extent_map_exit();
 
 
2470free_extent_io:
2471	extent_io_exit();
2472free_cachep:
2473	btrfs_destroy_cachep();
2474free_compress:
2475	btrfs_exit_compress();
2476	btrfs_exit_sysfs();
2477
2478	return err;
2479}
2480
2481static void __exit exit_btrfs_fs(void)
2482{
2483	btrfs_destroy_cachep();
2484	btrfs_delayed_ref_exit();
2485	btrfs_auto_defrag_exit();
2486	btrfs_delayed_inode_exit();
2487	btrfs_prelim_ref_exit();
2488	ordered_data_exit();
2489	extent_map_exit();
 
2490	extent_io_exit();
2491	btrfs_interface_exit();
2492	btrfs_end_io_wq_exit();
2493	unregister_filesystem(&btrfs_fs_type);
2494	btrfs_exit_sysfs();
2495	btrfs_cleanup_fs_uuids();
2496	btrfs_exit_compress();
2497}
2498
2499late_initcall(init_btrfs_fs);
2500module_exit(exit_btrfs_fs)
2501
2502MODULE_LICENSE("GPL");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/module.h>
 
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/highmem.h>
  11#include <linux/time.h>
  12#include <linux/init.h>
  13#include <linux/seq_file.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/mount.h>
 
 
  17#include <linux/writeback.h>
  18#include <linux/statfs.h>
  19#include <linux/compat.h>
  20#include <linux/parser.h>
  21#include <linux/ctype.h>
  22#include <linux/namei.h>
  23#include <linux/miscdevice.h>
  24#include <linux/magic.h>
  25#include <linux/slab.h>
  26#include <linux/cleancache.h>
  27#include <linux/ratelimit.h>
  28#include <linux/crc32c.h>
  29#include <linux/btrfs.h>
  30#include "delayed-inode.h"
  31#include "ctree.h"
  32#include "disk-io.h"
  33#include "transaction.h"
  34#include "btrfs_inode.h"
  35#include "print-tree.h"
  36#include "props.h"
  37#include "xattr.h"
  38#include "volumes.h"
  39#include "export.h"
  40#include "compression.h"
  41#include "rcu-string.h"
  42#include "dev-replace.h"
  43#include "free-space-cache.h"
  44#include "backref.h"
  45#include "space-info.h"
  46#include "sysfs.h"
  47#include "zoned.h"
  48#include "tests/btrfs-tests.h"
  49#include "block-group.h"
  50#include "discard.h"
  51#include "qgroup.h"
  52#define CREATE_TRACE_POINTS
  53#include <trace/events/btrfs.h>
  54
  55static const struct super_operations btrfs_super_ops;
  56
  57/*
  58 * Types for mounting the default subvolume and a subvolume explicitly
  59 * requested by subvol=/path. That way the callchain is straightforward and we
  60 * don't have to play tricks with the mount options and recursive calls to
  61 * btrfs_mount.
  62 *
  63 * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
  64 */
  65static struct file_system_type btrfs_fs_type;
  66static struct file_system_type btrfs_root_fs_type;
  67
  68static int btrfs_remount(struct super_block *sb, int *flags, char *data);
  69
  70/*
  71 * Generally the error codes correspond to their respective errors, but there
  72 * are a few special cases.
  73 *
  74 * EUCLEAN: Any sort of corruption that we encounter.  The tree-checker for
  75 *          instance will return EUCLEAN if any of the blocks are corrupted in
  76 *          a way that is problematic.  We want to reserve EUCLEAN for these
  77 *          sort of corruptions.
  78 *
  79 * EROFS: If we check BTRFS_FS_STATE_ERROR and fail out with a return error, we
  80 *        need to use EROFS for this case.  We will have no idea of the
  81 *        original failure, that will have been reported at the time we tripped
  82 *        over the error.  Each subsequent error that doesn't have any context
  83 *        of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR.
  84 */
  85const char * __attribute_const__ btrfs_decode_error(int errno)
  86{
  87	char *errstr = "unknown";
  88
  89	switch (errno) {
  90	case -ENOENT:		/* -2 */
  91		errstr = "No such entry";
  92		break;
  93	case -EIO:		/* -5 */
  94		errstr = "IO failure";
  95		break;
  96	case -ENOMEM:		/* -12*/
  97		errstr = "Out of memory";
  98		break;
  99	case -EEXIST:		/* -17 */
 
 
 
 100		errstr = "Object already exists";
 101		break;
 102	case -ENOSPC:		/* -28 */
 103		errstr = "No space left";
 104		break;
 105	case -EROFS:		/* -30 */
 106		errstr = "Readonly filesystem";
 107		break;
 108	case -EOPNOTSUPP:	/* -95 */
 109		errstr = "Operation not supported";
 110		break;
 111	case -EUCLEAN:		/* -117 */
 112		errstr = "Filesystem corrupted";
 113		break;
 114	case -EDQUOT:		/* -122 */
 115		errstr = "Quota exceeded";
 116		break;
 117	}
 118
 119	return errstr;
 120}
 121
 122/*
 123 * __btrfs_handle_fs_error decodes expected errors from the caller and
 124 * invokes the appropriate error response.
 125 */
 126__cold
 127void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
 128		       unsigned int line, int errno, const char *fmt, ...)
 129{
 130	struct super_block *sb = fs_info->sb;
 131#ifdef CONFIG_PRINTK
 132	const char *errstr;
 133#endif
 134
 135	/*
 136	 * Special case: if the error is EROFS, and we're already
 137	 * under SB_RDONLY, then it is safe here.
 138	 */
 139	if (errno == -EROFS && sb_rdonly(sb))
 140  		return;
 141
 142#ifdef CONFIG_PRINTK
 143	errstr = btrfs_decode_error(errno);
 144	if (fmt) {
 145		struct va_format vaf;
 146		va_list args;
 147
 148		va_start(args, fmt);
 149		vaf.fmt = fmt;
 150		vaf.va = &args;
 151
 152		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n",
 153			sb->s_id, function, line, errno, errstr, &vaf);
 154		va_end(args);
 155	} else {
 156		pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n",
 157			sb->s_id, function, line, errno, errstr);
 158	}
 159#endif
 160
 161	/*
 162	 * Today we only save the error info to memory.  Long term we'll
 163	 * also send it down to the disk
 164	 */
 165	set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 166
 167	/* Don't go through full error handling during mount */
 168	if (!(sb->s_flags & SB_BORN))
 169		return;
 170
 171	if (sb_rdonly(sb))
 172		return;
 173
 174	btrfs_discard_stop(fs_info);
 175
 176	/* btrfs handle error by forcing the filesystem readonly */
 177	btrfs_set_sb_rdonly(sb);
 178	btrfs_info(fs_info, "forced readonly");
 179	/*
 180	 * Note that a running device replace operation is not canceled here
 181	 * although there is no way to update the progress. It would add the
 182	 * risk of a deadlock, therefore the canceling is omitted. The only
 183	 * penalty is that some I/O remains active until the procedure
 184	 * completes. The next time when the filesystem is mounted writable
 185	 * again, the device replace operation continues.
 186	 */
 187}
 188
 189#ifdef CONFIG_PRINTK
 190static const char * const logtypes[] = {
 191	"emergency",
 192	"alert",
 193	"critical",
 194	"error",
 195	"warning",
 196	"notice",
 197	"info",
 198	"debug",
 199};
 200
 201
 202/*
 203 * Use one ratelimit state per log level so that a flood of less important
 204 * messages doesn't cause more important ones to be dropped.
 205 */
 206static struct ratelimit_state printk_limits[] = {
 207	RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
 208	RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
 209	RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
 210	RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
 211	RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
 212	RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
 213	RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
 214	RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
 215};
 216
 217void __cold btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 218{
 219	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
 220	struct va_format vaf;
 221	va_list args;
 222	int kern_level;
 223	const char *type = logtypes[4];
 224	struct ratelimit_state *ratelimit = &printk_limits[4];
 225
 226	va_start(args, fmt);
 227
 228	while ((kern_level = printk_get_level(fmt)) != 0) {
 229		size_t size = printk_skip_level(fmt) - fmt;
 230
 231		if (kern_level >= '0' && kern_level <= '7') {
 232			memcpy(lvl, fmt,  size);
 233			lvl[size] = '\0';
 234			type = logtypes[kern_level - '0'];
 235			ratelimit = &printk_limits[kern_level - '0'];
 236		}
 237		fmt += size;
 238	}
 239
 240	vaf.fmt = fmt;
 241	vaf.va = &args;
 242
 243	if (__ratelimit(ratelimit)) {
 244		if (fs_info)
 245			printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
 246				fs_info->sb->s_id, &vaf);
 247		else
 248			printk("%sBTRFS %s: %pV\n", lvl, type, &vaf);
 249	}
 250
 251	va_end(args);
 252}
 253#endif
 254
 255#if BITS_PER_LONG == 32
 256void __cold btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info)
 257{
 258	if (!test_and_set_bit(BTRFS_FS_32BIT_WARN, &fs_info->flags)) {
 259		btrfs_warn(fs_info, "reaching 32bit limit for logical addresses");
 260		btrfs_warn(fs_info,
 261"due to page cache limit on 32bit systems, btrfs can't access metadata at or beyond %lluT",
 262			   BTRFS_32BIT_MAX_FILE_SIZE >> 40);
 263		btrfs_warn(fs_info,
 264			   "please consider upgrading to 64bit kernel/hardware");
 265	}
 266}
 267
 268void __cold btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info)
 269{
 270	if (!test_and_set_bit(BTRFS_FS_32BIT_ERROR, &fs_info->flags)) {
 271		btrfs_err(fs_info, "reached 32bit limit for logical addresses");
 272		btrfs_err(fs_info,
 273"due to page cache limit on 32bit systems, metadata beyond %lluT can't be accessed",
 274			  BTRFS_32BIT_MAX_FILE_SIZE >> 40);
 275		btrfs_err(fs_info,
 276			   "please consider upgrading to 64bit kernel/hardware");
 277	}
 278}
 279#endif
 280
 281/*
 282 * We only mark the transaction aborted and then set the file system read-only.
 283 * This will prevent new transactions from starting or trying to join this
 284 * one.
 285 *
 286 * This means that error recovery at the call site is limited to freeing
 287 * any local memory allocations and passing the error code up without
 288 * further cleanup. The transaction should complete as it normally would
 289 * in the call path but will return -EIO.
 290 *
 291 * We'll complete the cleanup in btrfs_end_transaction and
 292 * btrfs_commit_transaction.
 293 */
 294__cold
 295void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
 296			       const char *function,
 297			       unsigned int line, int errno)
 298{
 299	struct btrfs_fs_info *fs_info = trans->fs_info;
 300
 301	WRITE_ONCE(trans->aborted, errno);
 
 
 
 
 
 
 
 
 
 
 
 302	WRITE_ONCE(trans->transaction->aborted, errno);
 303	/* Wake up anybody who may be waiting on this transaction */
 304	wake_up(&fs_info->transaction_wait);
 305	wake_up(&fs_info->transaction_blocked_wait);
 306	__btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
 307}
 308/*
 309 * __btrfs_panic decodes unexpected, fatal errors from the caller,
 310 * issues an alert, and either panics or BUGs, depending on mount options.
 311 */
 312__cold
 313void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
 314		   unsigned int line, int errno, const char *fmt, ...)
 315{
 316	char *s_id = "<unknown>";
 317	const char *errstr;
 318	struct va_format vaf = { .fmt = fmt };
 319	va_list args;
 320
 321	if (fs_info)
 322		s_id = fs_info->sb->s_id;
 323
 324	va_start(args, fmt);
 325	vaf.va = &args;
 326
 327	errstr = btrfs_decode_error(errno);
 328	if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
 329		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
 330			s_id, function, line, &vaf, errno, errstr);
 331
 332	btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
 333		   function, line, &vaf, errno, errstr);
 334	va_end(args);
 335	/* Caller calls BUG() */
 336}
 337
 338static void btrfs_put_super(struct super_block *sb)
 339{
 340	close_ctree(btrfs_sb(sb));
 341}
 342
 343enum {
 344	Opt_acl, Opt_noacl,
 345	Opt_clear_cache,
 346	Opt_commit_interval,
 347	Opt_compress,
 348	Opt_compress_force,
 349	Opt_compress_force_type,
 350	Opt_compress_type,
 351	Opt_degraded,
 352	Opt_device,
 353	Opt_fatal_errors,
 354	Opt_flushoncommit, Opt_noflushoncommit,
 
 355	Opt_max_inline,
 356	Opt_barrier, Opt_nobarrier,
 357	Opt_datacow, Opt_nodatacow,
 358	Opt_datasum, Opt_nodatasum,
 359	Opt_defrag, Opt_nodefrag,
 360	Opt_discard, Opt_nodiscard,
 361	Opt_discard_mode,
 362	Opt_norecovery,
 363	Opt_ratio,
 364	Opt_rescan_uuid_tree,
 365	Opt_skip_balance,
 366	Opt_space_cache, Opt_no_space_cache,
 367	Opt_space_cache_version,
 368	Opt_ssd, Opt_nossd,
 369	Opt_ssd_spread, Opt_nossd_spread,
 370	Opt_subvol,
 371	Opt_subvol_empty,
 372	Opt_subvolid,
 373	Opt_thread_pool,
 374	Opt_treelog, Opt_notreelog,
 
 375	Opt_user_subvol_rm_allowed,
 376
 377	/* Rescue options */
 378	Opt_rescue,
 379	Opt_usebackuproot,
 380	Opt_nologreplay,
 381	Opt_ignorebadroots,
 382	Opt_ignoredatacsums,
 383	Opt_rescue_all,
 384
 385	/* Deprecated options */
 
 386	Opt_recovery,
 387	Opt_inode_cache, Opt_noinode_cache,
 388
 389	/* Debugging options */
 390	Opt_check_integrity,
 391	Opt_check_integrity_including_extent_data,
 392	Opt_check_integrity_print_mask,
 393	Opt_enospc_debug, Opt_noenospc_debug,
 394#ifdef CONFIG_BTRFS_DEBUG
 395	Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
 396#endif
 397#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 398	Opt_ref_verify,
 399#endif
 400	Opt_err,
 401};
 402
 403static const match_table_t tokens = {
 404	{Opt_acl, "acl"},
 405	{Opt_noacl, "noacl"},
 406	{Opt_clear_cache, "clear_cache"},
 407	{Opt_commit_interval, "commit=%u"},
 408	{Opt_compress, "compress"},
 409	{Opt_compress_type, "compress=%s"},
 410	{Opt_compress_force, "compress-force"},
 411	{Opt_compress_force_type, "compress-force=%s"},
 412	{Opt_degraded, "degraded"},
 413	{Opt_device, "device=%s"},
 414	{Opt_fatal_errors, "fatal_errors=%s"},
 415	{Opt_flushoncommit, "flushoncommit"},
 416	{Opt_noflushoncommit, "noflushoncommit"},
 417	{Opt_inode_cache, "inode_cache"},
 418	{Opt_noinode_cache, "noinode_cache"},
 419	{Opt_max_inline, "max_inline=%s"},
 420	{Opt_barrier, "barrier"},
 421	{Opt_nobarrier, "nobarrier"},
 422	{Opt_datacow, "datacow"},
 423	{Opt_nodatacow, "nodatacow"},
 424	{Opt_datasum, "datasum"},
 425	{Opt_nodatasum, "nodatasum"},
 426	{Opt_defrag, "autodefrag"},
 427	{Opt_nodefrag, "noautodefrag"},
 428	{Opt_discard, "discard"},
 429	{Opt_discard_mode, "discard=%s"},
 430	{Opt_nodiscard, "nodiscard"},
 
 431	{Opt_norecovery, "norecovery"},
 432	{Opt_ratio, "metadata_ratio=%u"},
 433	{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
 434	{Opt_skip_balance, "skip_balance"},
 435	{Opt_space_cache, "space_cache"},
 436	{Opt_no_space_cache, "nospace_cache"},
 437	{Opt_space_cache_version, "space_cache=%s"},
 438	{Opt_ssd, "ssd"},
 439	{Opt_nossd, "nossd"},
 440	{Opt_ssd_spread, "ssd_spread"},
 441	{Opt_nossd_spread, "nossd_spread"},
 442	{Opt_subvol, "subvol=%s"},
 443	{Opt_subvol_empty, "subvol="},
 444	{Opt_subvolid, "subvolid=%s"},
 445	{Opt_thread_pool, "thread_pool=%u"},
 446	{Opt_treelog, "treelog"},
 447	{Opt_notreelog, "notreelog"},
 
 448	{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
 449
 450	/* Rescue options */
 451	{Opt_rescue, "rescue=%s"},
 452	/* Deprecated, with alias rescue=nologreplay */
 453	{Opt_nologreplay, "nologreplay"},
 454	/* Deprecated, with alias rescue=usebackuproot */
 455	{Opt_usebackuproot, "usebackuproot"},
 456
 457	/* Deprecated options */
 
 458	{Opt_recovery, "recovery"},
 
 459
 460	/* Debugging options */
 461	{Opt_check_integrity, "check_int"},
 462	{Opt_check_integrity_including_extent_data, "check_int_data"},
 463	{Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
 464	{Opt_enospc_debug, "enospc_debug"},
 465	{Opt_noenospc_debug, "noenospc_debug"},
 466#ifdef CONFIG_BTRFS_DEBUG
 467	{Opt_fragment_data, "fragment=data"},
 468	{Opt_fragment_metadata, "fragment=metadata"},
 469	{Opt_fragment_all, "fragment=all"},
 470#endif
 471#ifdef CONFIG_BTRFS_FS_REF_VERIFY
 472	{Opt_ref_verify, "ref_verify"},
 473#endif
 474	{Opt_err, NULL},
 475};
 476
 477static const match_table_t rescue_tokens = {
 478	{Opt_usebackuproot, "usebackuproot"},
 479	{Opt_nologreplay, "nologreplay"},
 480	{Opt_ignorebadroots, "ignorebadroots"},
 481	{Opt_ignorebadroots, "ibadroots"},
 482	{Opt_ignoredatacsums, "ignoredatacsums"},
 483	{Opt_ignoredatacsums, "idatacsums"},
 484	{Opt_rescue_all, "all"},
 485	{Opt_err, NULL},
 486};
 487
 488static bool check_ro_option(struct btrfs_fs_info *fs_info, unsigned long opt,
 489			    const char *opt_name)
 490{
 491	if (fs_info->mount_opt & opt) {
 492		btrfs_err(fs_info, "%s must be used with ro mount option",
 493			  opt_name);
 494		return true;
 495	}
 496	return false;
 497}
 498
 499static int parse_rescue_options(struct btrfs_fs_info *info, const char *options)
 500{
 501	char *opts;
 502	char *orig;
 503	char *p;
 504	substring_t args[MAX_OPT_ARGS];
 505	int ret = 0;
 506
 507	opts = kstrdup(options, GFP_KERNEL);
 508	if (!opts)
 509		return -ENOMEM;
 510	orig = opts;
 511
 512	while ((p = strsep(&opts, ":")) != NULL) {
 513		int token;
 514
 515		if (!*p)
 516			continue;
 517		token = match_token(p, rescue_tokens, args);
 518		switch (token){
 519		case Opt_usebackuproot:
 520			btrfs_info(info,
 521				   "trying to use backup root at mount time");
 522			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 523			break;
 524		case Opt_nologreplay:
 525			btrfs_set_and_info(info, NOLOGREPLAY,
 526					   "disabling log replay at mount time");
 527			break;
 528		case Opt_ignorebadroots:
 529			btrfs_set_and_info(info, IGNOREBADROOTS,
 530					   "ignoring bad roots");
 531			break;
 532		case Opt_ignoredatacsums:
 533			btrfs_set_and_info(info, IGNOREDATACSUMS,
 534					   "ignoring data csums");
 535			break;
 536		case Opt_rescue_all:
 537			btrfs_info(info, "enabling all of the rescue options");
 538			btrfs_set_and_info(info, IGNOREDATACSUMS,
 539					   "ignoring data csums");
 540			btrfs_set_and_info(info, IGNOREBADROOTS,
 541					   "ignoring bad roots");
 542			btrfs_set_and_info(info, NOLOGREPLAY,
 543					   "disabling log replay at mount time");
 544			break;
 545		case Opt_err:
 546			btrfs_info(info, "unrecognized rescue option '%s'", p);
 547			ret = -EINVAL;
 548			goto out;
 549		default:
 550			break;
 551		}
 552
 553	}
 554out:
 555	kfree(orig);
 556	return ret;
 557}
 558
 559/*
 560 * Regular mount options parser.  Everything that is needed only when
 561 * reading in a new superblock is parsed here.
 562 * XXX JDM: This needs to be cleaned up for remount.
 563 */
 564int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
 565			unsigned long new_flags)
 566{
 567	substring_t args[MAX_OPT_ARGS];
 568	char *p, *num;
 
 569	int intarg;
 570	int ret = 0;
 571	char *compress_type;
 572	bool compress_force = false;
 573	enum btrfs_compression_type saved_compress_type;
 574	int saved_compress_level;
 575	bool saved_compress_force;
 576	int no_compress = 0;
 577
 
 578	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
 579		btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
 580	else if (btrfs_free_space_cache_v1_active(info)) {
 581		if (btrfs_is_zoned(info)) {
 582			btrfs_info(info,
 583			"zoned: clearing existing space cache");
 584			btrfs_set_super_cache_generation(info->super_copy, 0);
 585		} else {
 586			btrfs_set_opt(info->mount_opt, SPACE_CACHE);
 587		}
 588	}
 589
 590	/*
 591	 * Even the options are empty, we still need to do extra check
 592	 * against new flags
 593	 */
 594	if (!options)
 595		goto check;
 596
 597	while ((p = strsep(&options, ",")) != NULL) {
 598		int token;
 599		if (!*p)
 600			continue;
 601
 602		token = match_token(p, tokens, args);
 603		switch (token) {
 604		case Opt_degraded:
 605			btrfs_info(info, "allowing degraded mounts");
 606			btrfs_set_opt(info->mount_opt, DEGRADED);
 607			break;
 608		case Opt_subvol:
 609		case Opt_subvol_empty:
 610		case Opt_subvolid:
 
 611		case Opt_device:
 612			/*
 613			 * These are parsed by btrfs_parse_subvol_options or
 614			 * btrfs_parse_device_options and can be ignored here.
 
 615			 */
 616			break;
 617		case Opt_nodatasum:
 618			btrfs_set_and_info(info, NODATASUM,
 619					   "setting nodatasum");
 620			break;
 621		case Opt_datasum:
 622			if (btrfs_test_opt(info, NODATASUM)) {
 623				if (btrfs_test_opt(info, NODATACOW))
 624					btrfs_info(info,
 625						   "setting datasum, datacow enabled");
 626				else
 627					btrfs_info(info, "setting datasum");
 628			}
 629			btrfs_clear_opt(info->mount_opt, NODATACOW);
 630			btrfs_clear_opt(info->mount_opt, NODATASUM);
 631			break;
 632		case Opt_nodatacow:
 633			if (!btrfs_test_opt(info, NODATACOW)) {
 634				if (!btrfs_test_opt(info, COMPRESS) ||
 635				    !btrfs_test_opt(info, FORCE_COMPRESS)) {
 636					btrfs_info(info,
 637						   "setting nodatacow, compression disabled");
 638				} else {
 639					btrfs_info(info, "setting nodatacow");
 640				}
 641			}
 642			btrfs_clear_opt(info->mount_opt, COMPRESS);
 643			btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 644			btrfs_set_opt(info->mount_opt, NODATACOW);
 645			btrfs_set_opt(info->mount_opt, NODATASUM);
 646			break;
 647		case Opt_datacow:
 648			btrfs_clear_and_info(info, NODATACOW,
 649					     "setting datacow");
 650			break;
 651		case Opt_compress_force:
 652		case Opt_compress_force_type:
 653			compress_force = true;
 654			fallthrough;
 655		case Opt_compress:
 656		case Opt_compress_type:
 657			saved_compress_type = btrfs_test_opt(info,
 658							     COMPRESS) ?
 659				info->compress_type : BTRFS_COMPRESS_NONE;
 660			saved_compress_force =
 661				btrfs_test_opt(info, FORCE_COMPRESS);
 662			saved_compress_level = info->compress_level;
 663			if (token == Opt_compress ||
 664			    token == Opt_compress_force ||
 665			    strncmp(args[0].from, "zlib", 4) == 0) {
 666				compress_type = "zlib";
 667
 668				info->compress_type = BTRFS_COMPRESS_ZLIB;
 669				info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
 670				/*
 671				 * args[0] contains uninitialized data since
 672				 * for these tokens we don't expect any
 673				 * parameter.
 674				 */
 675				if (token != Opt_compress &&
 676				    token != Opt_compress_force)
 677					info->compress_level =
 678					  btrfs_compress_str2level(
 679							BTRFS_COMPRESS_ZLIB,
 680							args[0].from + 4);
 681				btrfs_set_opt(info->mount_opt, COMPRESS);
 682				btrfs_clear_opt(info->mount_opt, NODATACOW);
 683				btrfs_clear_opt(info->mount_opt, NODATASUM);
 684				no_compress = 0;
 685			} else if (strncmp(args[0].from, "lzo", 3) == 0) {
 686				compress_type = "lzo";
 687				info->compress_type = BTRFS_COMPRESS_LZO;
 688				info->compress_level = 0;
 689				btrfs_set_opt(info->mount_opt, COMPRESS);
 690				btrfs_clear_opt(info->mount_opt, NODATACOW);
 691				btrfs_clear_opt(info->mount_opt, NODATASUM);
 692				btrfs_set_fs_incompat(info, COMPRESS_LZO);
 693				no_compress = 0;
 694			} else if (strncmp(args[0].from, "zstd", 4) == 0) {
 695				compress_type = "zstd";
 696				info->compress_type = BTRFS_COMPRESS_ZSTD;
 697				info->compress_level =
 698					btrfs_compress_str2level(
 699							 BTRFS_COMPRESS_ZSTD,
 700							 args[0].from + 4);
 701				btrfs_set_opt(info->mount_opt, COMPRESS);
 702				btrfs_clear_opt(info->mount_opt, NODATACOW);
 703				btrfs_clear_opt(info->mount_opt, NODATASUM);
 704				btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
 705				no_compress = 0;
 706			} else if (strncmp(args[0].from, "no", 2) == 0) {
 707				compress_type = "no";
 708				info->compress_level = 0;
 709				info->compress_type = 0;
 710				btrfs_clear_opt(info->mount_opt, COMPRESS);
 711				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 712				compress_force = false;
 713				no_compress++;
 714			} else {
 715				ret = -EINVAL;
 716				goto out;
 717			}
 718
 719			if (compress_force) {
 720				btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
 721			} else {
 722				/*
 723				 * If we remount from compress-force=xxx to
 724				 * compress=xxx, we need clear FORCE_COMPRESS
 725				 * flag, otherwise, there is no way for users
 726				 * to disable forcible compression separately.
 727				 */
 728				btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
 729			}
 730			if (no_compress == 1) {
 731				btrfs_info(info, "use no compression");
 732			} else if ((info->compress_type != saved_compress_type) ||
 733				   (compress_force != saved_compress_force) ||
 734				   (info->compress_level != saved_compress_level)) {
 735				btrfs_info(info, "%s %s compression, level %d",
 736					   (compress_force) ? "force" : "use",
 737					   compress_type, info->compress_level);
 738			}
 739			compress_force = false;
 740			break;
 741		case Opt_ssd:
 742			btrfs_set_and_info(info, SSD,
 743					   "enabling ssd optimizations");
 744			btrfs_clear_opt(info->mount_opt, NOSSD);
 745			break;
 746		case Opt_ssd_spread:
 747			btrfs_set_and_info(info, SSD,
 748					   "enabling ssd optimizations");
 749			btrfs_set_and_info(info, SSD_SPREAD,
 750					   "using spread ssd allocation scheme");
 751			btrfs_clear_opt(info->mount_opt, NOSSD);
 752			break;
 753		case Opt_nossd:
 754			btrfs_set_opt(info->mount_opt, NOSSD);
 755			btrfs_clear_and_info(info, SSD,
 756					     "not using ssd optimizations");
 757			fallthrough;
 758		case Opt_nossd_spread:
 759			btrfs_clear_and_info(info, SSD_SPREAD,
 760					     "not using spread ssd allocation scheme");
 761			break;
 762		case Opt_barrier:
 763			btrfs_clear_and_info(info, NOBARRIER,
 764					     "turning on barriers");
 765			break;
 766		case Opt_nobarrier:
 767			btrfs_set_and_info(info, NOBARRIER,
 768					   "turning off barriers");
 769			break;
 770		case Opt_thread_pool:
 771			ret = match_int(&args[0], &intarg);
 772			if (ret) {
 773				goto out;
 774			} else if (intarg == 0) {
 775				ret = -EINVAL;
 776				goto out;
 777			}
 778			info->thread_pool_size = intarg;
 779			break;
 780		case Opt_max_inline:
 781			num = match_strdup(&args[0]);
 782			if (num) {
 783				info->max_inline = memparse(num, NULL);
 784				kfree(num);
 785
 786				if (info->max_inline) {
 787					info->max_inline = min_t(u64,
 788						info->max_inline,
 789						info->sectorsize);
 790				}
 791				btrfs_info(info, "max_inline at %llu",
 792					   info->max_inline);
 793			} else {
 794				ret = -ENOMEM;
 795				goto out;
 796			}
 797			break;
 
 
 
 
 798		case Opt_acl:
 799#ifdef CONFIG_BTRFS_FS_POSIX_ACL
 800			info->sb->s_flags |= SB_POSIXACL;
 801			break;
 802#else
 803			btrfs_err(info, "support for ACL not compiled in!");
 804			ret = -EINVAL;
 805			goto out;
 806#endif
 807		case Opt_noacl:
 808			info->sb->s_flags &= ~SB_POSIXACL;
 809			break;
 810		case Opt_notreelog:
 811			btrfs_set_and_info(info, NOTREELOG,
 812					   "disabling tree log");
 813			break;
 814		case Opt_treelog:
 815			btrfs_clear_and_info(info, NOTREELOG,
 816					     "enabling tree log");
 817			break;
 818		case Opt_norecovery:
 819		case Opt_nologreplay:
 820			btrfs_warn(info,
 821		"'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
 822			btrfs_set_and_info(info, NOLOGREPLAY,
 823					   "disabling log replay at mount time");
 824			break;
 825		case Opt_flushoncommit:
 826			btrfs_set_and_info(info, FLUSHONCOMMIT,
 827					   "turning on flush-on-commit");
 828			break;
 829		case Opt_noflushoncommit:
 830			btrfs_clear_and_info(info, FLUSHONCOMMIT,
 831					     "turning off flush-on-commit");
 832			break;
 833		case Opt_ratio:
 834			ret = match_int(&args[0], &intarg);
 835			if (ret)
 836				goto out;
 837			info->metadata_ratio = intarg;
 838			btrfs_info(info, "metadata ratio %u",
 839				   info->metadata_ratio);
 840			break;
 841		case Opt_discard:
 842		case Opt_discard_mode:
 843			if (token == Opt_discard ||
 844			    strcmp(args[0].from, "sync") == 0) {
 845				btrfs_clear_opt(info->mount_opt, DISCARD_ASYNC);
 846				btrfs_set_and_info(info, DISCARD_SYNC,
 847						   "turning on sync discard");
 848			} else if (strcmp(args[0].from, "async") == 0) {
 849				btrfs_clear_opt(info->mount_opt, DISCARD_SYNC);
 850				btrfs_set_and_info(info, DISCARD_ASYNC,
 851						   "turning on async discard");
 852			} else {
 853				ret = -EINVAL;
 854				goto out;
 855			}
 856			break;
 857		case Opt_nodiscard:
 858			btrfs_clear_and_info(info, DISCARD_SYNC,
 859					     "turning off discard");
 860			btrfs_clear_and_info(info, DISCARD_ASYNC,
 861					     "turning off async discard");
 862			break;
 863		case Opt_space_cache:
 864		case Opt_space_cache_version:
 865			if (token == Opt_space_cache ||
 866			    strcmp(args[0].from, "v1") == 0) {
 867				btrfs_clear_opt(info->mount_opt,
 868						FREE_SPACE_TREE);
 869				btrfs_set_and_info(info, SPACE_CACHE,
 870					   "enabling disk space caching");
 871			} else if (strcmp(args[0].from, "v2") == 0) {
 872				btrfs_clear_opt(info->mount_opt,
 873						SPACE_CACHE);
 874				btrfs_set_and_info(info, FREE_SPACE_TREE,
 875						   "enabling free space tree");
 876			} else {
 877				ret = -EINVAL;
 878				goto out;
 879			}
 880			break;
 881		case Opt_rescan_uuid_tree:
 882			btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
 883			break;
 884		case Opt_no_space_cache:
 885			if (btrfs_test_opt(info, SPACE_CACHE)) {
 886				btrfs_clear_and_info(info, SPACE_CACHE,
 887					     "disabling disk space caching");
 888			}
 889			if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
 890				btrfs_clear_and_info(info, FREE_SPACE_TREE,
 891					     "disabling free space tree");
 892			}
 893			break;
 894		case Opt_inode_cache:
 
 
 
 895		case Opt_noinode_cache:
 896			btrfs_warn(info,
 897	"the 'inode_cache' option is deprecated and has no effect since 5.11");
 898			break;
 899		case Opt_clear_cache:
 900			btrfs_set_and_info(info, CLEAR_CACHE,
 901					   "force clearing of disk cache");
 902			break;
 903		case Opt_user_subvol_rm_allowed:
 904			btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
 905			break;
 906		case Opt_enospc_debug:
 907			btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
 908			break;
 909		case Opt_noenospc_debug:
 910			btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
 911			break;
 912		case Opt_defrag:
 913			btrfs_set_and_info(info, AUTO_DEFRAG,
 914					   "enabling auto defrag");
 915			break;
 916		case Opt_nodefrag:
 917			btrfs_clear_and_info(info, AUTO_DEFRAG,
 918					     "disabling auto defrag");
 919			break;
 920		case Opt_recovery:
 
 
 921		case Opt_usebackuproot:
 922			btrfs_warn(info,
 923			"'%s' is deprecated, use 'rescue=usebackuproot' instead",
 924				   token == Opt_recovery ? "recovery" :
 925				   "usebackuproot");
 926			btrfs_info(info,
 927				   "trying to use backup root at mount time");
 928			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 929			break;
 930		case Opt_skip_balance:
 931			btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
 932			break;
 933#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 934		case Opt_check_integrity_including_extent_data:
 935			btrfs_info(info,
 936				   "enabling check integrity including extent data");
 937			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY_DATA);
 
 938			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 939			break;
 940		case Opt_check_integrity:
 941			btrfs_info(info, "enabling check integrity");
 942			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 943			break;
 944		case Opt_check_integrity_print_mask:
 945			ret = match_int(&args[0], &intarg);
 946			if (ret)
 947				goto out;
 948			info->check_integrity_print_mask = intarg;
 949			btrfs_info(info, "check_integrity_print_mask 0x%x",
 950				   info->check_integrity_print_mask);
 951			break;
 952#else
 953		case Opt_check_integrity_including_extent_data:
 954		case Opt_check_integrity:
 955		case Opt_check_integrity_print_mask:
 956			btrfs_err(info,
 957				  "support for check_integrity* not compiled in!");
 958			ret = -EINVAL;
 959			goto out;
 960#endif
 961		case Opt_fatal_errors:
 962			if (strcmp(args[0].from, "panic") == 0)
 963				btrfs_set_opt(info->mount_opt,
 964					      PANIC_ON_FATAL_ERROR);
 965			else if (strcmp(args[0].from, "bug") == 0)
 966				btrfs_clear_opt(info->mount_opt,
 967					      PANIC_ON_FATAL_ERROR);
 968			else {
 969				ret = -EINVAL;
 970				goto out;
 971			}
 972			break;
 973		case Opt_commit_interval:
 974			intarg = 0;
 975			ret = match_int(&args[0], &intarg);
 976			if (ret)
 977				goto out;
 978			if (intarg == 0) {
 979				btrfs_info(info,
 980					   "using default commit interval %us",
 981					   BTRFS_DEFAULT_COMMIT_INTERVAL);
 982				intarg = BTRFS_DEFAULT_COMMIT_INTERVAL;
 983			} else if (intarg > 300) {
 984				btrfs_warn(info, "excessive commit interval %d",
 985					   intarg);
 986			}
 987			info->commit_interval = intarg;
 988			break;
 989		case Opt_rescue:
 990			ret = parse_rescue_options(info, args[0].from);
 991			if (ret < 0)
 992				goto out;
 993			break;
 994#ifdef CONFIG_BTRFS_DEBUG
 995		case Opt_fragment_all:
 996			btrfs_info(info, "fragmenting all space");
 997			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 998			btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
 999			break;
1000		case Opt_fragment_metadata:
1001			btrfs_info(info, "fragmenting metadata");
1002			btrfs_set_opt(info->mount_opt,
1003				      FRAGMENT_METADATA);
1004			break;
1005		case Opt_fragment_data:
1006			btrfs_info(info, "fragmenting data");
1007			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
1008			break;
1009#endif
1010#ifdef CONFIG_BTRFS_FS_REF_VERIFY
1011		case Opt_ref_verify:
1012			btrfs_info(info, "doing ref verification");
1013			btrfs_set_opt(info->mount_opt, REF_VERIFY);
1014			break;
1015#endif
1016		case Opt_err:
1017			btrfs_err(info, "unrecognized mount option '%s'", p);
1018			ret = -EINVAL;
1019			goto out;
1020		default:
1021			break;
1022		}
1023	}
1024check:
1025	/* We're read-only, don't have to check. */
1026	if (new_flags & SB_RDONLY)
1027		goto out;
1028
1029	if (check_ro_option(info, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") ||
1030	    check_ro_option(info, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") ||
1031	    check_ro_option(info, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums"))
1032		ret = -EINVAL;
 
1033out:
1034	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
1035	    !btrfs_test_opt(info, FREE_SPACE_TREE) &&
1036	    !btrfs_test_opt(info, CLEAR_CACHE)) {
1037		btrfs_err(info, "cannot disable free space tree");
1038		ret = -EINVAL;
1039
1040	}
1041	if (!ret)
1042		ret = btrfs_check_mountopts_zoned(info);
1043	if (!ret && btrfs_test_opt(info, SPACE_CACHE))
1044		btrfs_info(info, "disk space caching is enabled");
1045	if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
1046		btrfs_info(info, "using free space tree");
1047	return ret;
1048}
1049
1050/*
1051 * Parse mount options that are required early in the mount process.
1052 *
1053 * All other options will be parsed on much later in the mount process and
1054 * only when we need to allocate a new super block.
1055 */
1056static int btrfs_parse_device_options(const char *options, fmode_t flags,
1057				      void *holder)
1058{
1059	substring_t args[MAX_OPT_ARGS];
1060	char *device_name, *opts, *orig, *p;
1061	struct btrfs_device *device = NULL;
1062	int error = 0;
1063
1064	lockdep_assert_held(&uuid_mutex);
1065
1066	if (!options)
1067		return 0;
1068
1069	/*
1070	 * strsep changes the string, duplicate it because btrfs_parse_options
1071	 * gets called later
1072	 */
1073	opts = kstrdup(options, GFP_KERNEL);
1074	if (!opts)
1075		return -ENOMEM;
1076	orig = opts;
1077
1078	while ((p = strsep(&opts, ",")) != NULL) {
1079		int token;
1080
1081		if (!*p)
1082			continue;
1083
1084		token = match_token(p, tokens, args);
1085		if (token == Opt_device) {
1086			device_name = match_strdup(&args[0]);
1087			if (!device_name) {
1088				error = -ENOMEM;
1089				goto out;
1090			}
1091			device = btrfs_scan_one_device(device_name, flags,
1092					holder);
1093			kfree(device_name);
1094			if (IS_ERR(device)) {
1095				error = PTR_ERR(device);
1096				goto out;
1097			}
1098		}
1099	}
1100
1101out:
1102	kfree(orig);
1103	return error;
1104}
1105
1106/*
1107 * Parse mount options that are related to subvolume id
1108 *
1109 * The value is later passed to mount_subvol()
1110 */
1111static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
1112		u64 *subvol_objectid)
1113{
1114	substring_t args[MAX_OPT_ARGS];
1115	char *opts, *orig, *p;
1116	int error = 0;
1117	u64 subvolid;
1118
1119	if (!options)
1120		return 0;
1121
1122	/*
1123	 * strsep changes the string, duplicate it because
1124	 * btrfs_parse_device_options gets called later
1125	 */
1126	opts = kstrdup(options, GFP_KERNEL);
1127	if (!opts)
1128		return -ENOMEM;
1129	orig = opts;
1130
1131	while ((p = strsep(&opts, ",")) != NULL) {
1132		int token;
1133		if (!*p)
1134			continue;
1135
1136		token = match_token(p, tokens, args);
1137		switch (token) {
1138		case Opt_subvol:
1139			kfree(*subvol_name);
1140			*subvol_name = match_strdup(&args[0]);
1141			if (!*subvol_name) {
1142				error = -ENOMEM;
1143				goto out;
1144			}
1145			break;
1146		case Opt_subvolid:
1147			error = match_u64(&args[0], &subvolid);
1148			if (error)
1149				goto out;
1150
1151			/* we want the original fs_tree */
1152			if (subvolid == 0)
1153				subvolid = BTRFS_FS_TREE_OBJECTID;
1154
1155			*subvol_objectid = subvolid;
1156			break;
 
 
 
1157		default:
1158			break;
1159		}
1160	}
1161
1162out:
1163	kfree(orig);
1164	return error;
1165}
1166
1167char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1168					  u64 subvol_objectid)
1169{
1170	struct btrfs_root *root = fs_info->tree_root;
1171	struct btrfs_root *fs_root = NULL;
1172	struct btrfs_root_ref *root_ref;
1173	struct btrfs_inode_ref *inode_ref;
1174	struct btrfs_key key;
1175	struct btrfs_path *path = NULL;
1176	char *name = NULL, *ptr;
1177	u64 dirid;
1178	int len;
1179	int ret;
1180
1181	path = btrfs_alloc_path();
1182	if (!path) {
1183		ret = -ENOMEM;
1184		goto err;
1185	}
 
1186
1187	name = kmalloc(PATH_MAX, GFP_KERNEL);
1188	if (!name) {
1189		ret = -ENOMEM;
1190		goto err;
1191	}
1192	ptr = name + PATH_MAX - 1;
1193	ptr[0] = '\0';
1194
1195	/*
1196	 * Walk up the subvolume trees in the tree of tree roots by root
1197	 * backrefs until we hit the top-level subvolume.
1198	 */
1199	while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
1200		key.objectid = subvol_objectid;
1201		key.type = BTRFS_ROOT_BACKREF_KEY;
1202		key.offset = (u64)-1;
1203
1204		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1205		if (ret < 0) {
1206			goto err;
1207		} else if (ret > 0) {
1208			ret = btrfs_previous_item(root, path, subvol_objectid,
1209						  BTRFS_ROOT_BACKREF_KEY);
1210			if (ret < 0) {
1211				goto err;
1212			} else if (ret > 0) {
1213				ret = -ENOENT;
1214				goto err;
1215			}
1216		}
1217
1218		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1219		subvol_objectid = key.offset;
1220
1221		root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1222					  struct btrfs_root_ref);
1223		len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
1224		ptr -= len + 1;
1225		if (ptr < name) {
1226			ret = -ENAMETOOLONG;
1227			goto err;
1228		}
1229		read_extent_buffer(path->nodes[0], ptr + 1,
1230				   (unsigned long)(root_ref + 1), len);
1231		ptr[0] = '/';
1232		dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
1233		btrfs_release_path(path);
1234
1235		fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true);
 
 
 
1236		if (IS_ERR(fs_root)) {
1237			ret = PTR_ERR(fs_root);
1238			fs_root = NULL;
1239			goto err;
1240		}
1241
1242		/*
1243		 * Walk up the filesystem tree by inode refs until we hit the
1244		 * root directory.
1245		 */
1246		while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
1247			key.objectid = dirid;
1248			key.type = BTRFS_INODE_REF_KEY;
1249			key.offset = (u64)-1;
1250
1251			ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1252			if (ret < 0) {
1253				goto err;
1254			} else if (ret > 0) {
1255				ret = btrfs_previous_item(fs_root, path, dirid,
1256							  BTRFS_INODE_REF_KEY);
1257				if (ret < 0) {
1258					goto err;
1259				} else if (ret > 0) {
1260					ret = -ENOENT;
1261					goto err;
1262				}
1263			}
1264
1265			btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1266			dirid = key.offset;
1267
1268			inode_ref = btrfs_item_ptr(path->nodes[0],
1269						   path->slots[0],
1270						   struct btrfs_inode_ref);
1271			len = btrfs_inode_ref_name_len(path->nodes[0],
1272						       inode_ref);
1273			ptr -= len + 1;
1274			if (ptr < name) {
1275				ret = -ENAMETOOLONG;
1276				goto err;
1277			}
1278			read_extent_buffer(path->nodes[0], ptr + 1,
1279					   (unsigned long)(inode_ref + 1), len);
1280			ptr[0] = '/';
1281			btrfs_release_path(path);
1282		}
1283		btrfs_put_root(fs_root);
1284		fs_root = NULL;
1285	}
1286
1287	btrfs_free_path(path);
1288	if (ptr == name + PATH_MAX - 1) {
1289		name[0] = '/';
1290		name[1] = '\0';
1291	} else {
1292		memmove(name, ptr, name + PATH_MAX - ptr);
1293	}
1294	return name;
1295
1296err:
1297	btrfs_put_root(fs_root);
1298	btrfs_free_path(path);
1299	kfree(name);
1300	return ERR_PTR(ret);
1301}
1302
1303static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
1304{
1305	struct btrfs_root *root = fs_info->tree_root;
1306	struct btrfs_dir_item *di;
1307	struct btrfs_path *path;
1308	struct btrfs_key location;
1309	u64 dir_id;
1310
1311	path = btrfs_alloc_path();
1312	if (!path)
1313		return -ENOMEM;
 
1314
1315	/*
1316	 * Find the "default" dir item which points to the root item that we
1317	 * will mount by default if we haven't been given a specific subvolume
1318	 * to mount.
1319	 */
1320	dir_id = btrfs_super_root_dir(fs_info->super_copy);
1321	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
1322	if (IS_ERR(di)) {
1323		btrfs_free_path(path);
1324		return PTR_ERR(di);
1325	}
1326	if (!di) {
1327		/*
1328		 * Ok the default dir item isn't there.  This is weird since
1329		 * it's always been there, but don't freak out, just try and
1330		 * mount the top-level subvolume.
1331		 */
1332		btrfs_free_path(path);
1333		*objectid = BTRFS_FS_TREE_OBJECTID;
1334		return 0;
1335	}
1336
1337	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1338	btrfs_free_path(path);
1339	*objectid = location.objectid;
1340	return 0;
1341}
1342
1343static int btrfs_fill_super(struct super_block *sb,
1344			    struct btrfs_fs_devices *fs_devices,
1345			    void *data)
1346{
1347	struct inode *inode;
1348	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 
1349	int err;
1350
1351	sb->s_maxbytes = MAX_LFS_FILESIZE;
1352	sb->s_magic = BTRFS_SUPER_MAGIC;
1353	sb->s_op = &btrfs_super_ops;
1354	sb->s_d_op = &btrfs_dentry_operations;
1355	sb->s_export_op = &btrfs_export_ops;
1356	sb->s_xattr = btrfs_xattr_handlers;
1357	sb->s_time_gran = 1;
1358#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1359	sb->s_flags |= SB_POSIXACL;
1360#endif
1361	sb->s_flags |= SB_I_VERSION;
1362	sb->s_iflags |= SB_I_CGROUPWB;
1363
1364	err = super_setup_bdi(sb);
1365	if (err) {
1366		btrfs_err(fs_info, "super_setup_bdi failed");
1367		return err;
1368	}
1369
1370	err = open_ctree(sb, fs_devices, (char *)data);
1371	if (err) {
1372		btrfs_err(fs_info, "open_ctree failed");
1373		return err;
1374	}
1375
1376	inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
 
 
 
1377	if (IS_ERR(inode)) {
1378		err = PTR_ERR(inode);
1379		goto fail_close;
1380	}
1381
1382	sb->s_root = d_make_root(inode);
1383	if (!sb->s_root) {
1384		err = -ENOMEM;
1385		goto fail_close;
1386	}
1387
1388	cleancache_init_fs(sb);
1389	sb->s_flags |= SB_ACTIVE;
1390	return 0;
1391
1392fail_close:
1393	close_ctree(fs_info);
1394	return err;
1395}
1396
1397int btrfs_sync_fs(struct super_block *sb, int wait)
1398{
1399	struct btrfs_trans_handle *trans;
1400	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1401	struct btrfs_root *root = fs_info->tree_root;
1402
1403	trace_btrfs_sync_fs(fs_info, wait);
1404
1405	if (!wait) {
1406		filemap_flush(fs_info->btree_inode->i_mapping);
1407		return 0;
1408	}
1409
1410	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1411
1412	trans = btrfs_attach_transaction_barrier(root);
1413	if (IS_ERR(trans)) {
1414		/* no transaction, don't bother */
1415		if (PTR_ERR(trans) == -ENOENT) {
1416			/*
1417			 * Exit unless we have some pending changes
1418			 * that need to go through commit
1419			 */
1420			if (fs_info->pending_changes == 0)
1421				return 0;
1422			/*
1423			 * A non-blocking test if the fs is frozen. We must not
1424			 * start a new transaction here otherwise a deadlock
1425			 * happens. The pending operations are delayed to the
1426			 * next commit after thawing.
1427			 */
1428			if (sb_start_write_trylock(sb))
1429				sb_end_write(sb);
1430			else
1431				return 0;
1432			trans = btrfs_start_transaction(root, 0);
1433		}
1434		if (IS_ERR(trans))
1435			return PTR_ERR(trans);
1436	}
1437	return btrfs_commit_transaction(trans);
1438}
1439
1440static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed)
1441{
1442	seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s);
1443	*printed = true;
1444}
1445
1446static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1447{
1448	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1449	const char *compress_type;
1450	const char *subvol_name;
1451	bool printed = false;
1452
1453	if (btrfs_test_opt(info, DEGRADED))
1454		seq_puts(seq, ",degraded");
1455	if (btrfs_test_opt(info, NODATASUM))
1456		seq_puts(seq, ",nodatasum");
1457	if (btrfs_test_opt(info, NODATACOW))
1458		seq_puts(seq, ",nodatacow");
1459	if (btrfs_test_opt(info, NOBARRIER))
1460		seq_puts(seq, ",nobarrier");
1461	if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
1462		seq_printf(seq, ",max_inline=%llu", info->max_inline);
1463	if (info->thread_pool_size !=  min_t(unsigned long,
1464					     num_online_cpus() + 2, 8))
1465		seq_printf(seq, ",thread_pool=%u", info->thread_pool_size);
1466	if (btrfs_test_opt(info, COMPRESS)) {
1467		compress_type = btrfs_compress_type2str(info->compress_type);
1468		if (btrfs_test_opt(info, FORCE_COMPRESS))
1469			seq_printf(seq, ",compress-force=%s", compress_type);
1470		else
1471			seq_printf(seq, ",compress=%s", compress_type);
1472		if (info->compress_level)
1473			seq_printf(seq, ":%d", info->compress_level);
1474	}
1475	if (btrfs_test_opt(info, NOSSD))
1476		seq_puts(seq, ",nossd");
1477	if (btrfs_test_opt(info, SSD_SPREAD))
1478		seq_puts(seq, ",ssd_spread");
1479	else if (btrfs_test_opt(info, SSD))
1480		seq_puts(seq, ",ssd");
1481	if (btrfs_test_opt(info, NOTREELOG))
1482		seq_puts(seq, ",notreelog");
1483	if (btrfs_test_opt(info, NOLOGREPLAY))
1484		print_rescue_option(seq, "nologreplay", &printed);
1485	if (btrfs_test_opt(info, USEBACKUPROOT))
1486		print_rescue_option(seq, "usebackuproot", &printed);
1487	if (btrfs_test_opt(info, IGNOREBADROOTS))
1488		print_rescue_option(seq, "ignorebadroots", &printed);
1489	if (btrfs_test_opt(info, IGNOREDATACSUMS))
1490		print_rescue_option(seq, "ignoredatacsums", &printed);
1491	if (btrfs_test_opt(info, FLUSHONCOMMIT))
1492		seq_puts(seq, ",flushoncommit");
1493	if (btrfs_test_opt(info, DISCARD_SYNC))
1494		seq_puts(seq, ",discard");
1495	if (btrfs_test_opt(info, DISCARD_ASYNC))
1496		seq_puts(seq, ",discard=async");
1497	if (!(info->sb->s_flags & SB_POSIXACL))
1498		seq_puts(seq, ",noacl");
1499	if (btrfs_free_space_cache_v1_active(info))
1500		seq_puts(seq, ",space_cache");
1501	else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
1502		seq_puts(seq, ",space_cache=v2");
1503	else
1504		seq_puts(seq, ",nospace_cache");
1505	if (btrfs_test_opt(info, RESCAN_UUID_TREE))
1506		seq_puts(seq, ",rescan_uuid_tree");
1507	if (btrfs_test_opt(info, CLEAR_CACHE))
1508		seq_puts(seq, ",clear_cache");
1509	if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
1510		seq_puts(seq, ",user_subvol_rm_allowed");
1511	if (btrfs_test_opt(info, ENOSPC_DEBUG))
1512		seq_puts(seq, ",enospc_debug");
1513	if (btrfs_test_opt(info, AUTO_DEFRAG))
1514		seq_puts(seq, ",autodefrag");
 
 
1515	if (btrfs_test_opt(info, SKIP_BALANCE))
1516		seq_puts(seq, ",skip_balance");
1517#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1518	if (btrfs_test_opt(info, CHECK_INTEGRITY_DATA))
1519		seq_puts(seq, ",check_int_data");
1520	else if (btrfs_test_opt(info, CHECK_INTEGRITY))
1521		seq_puts(seq, ",check_int");
1522	if (info->check_integrity_print_mask)
1523		seq_printf(seq, ",check_int_print_mask=%d",
1524				info->check_integrity_print_mask);
1525#endif
1526	if (info->metadata_ratio)
1527		seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
1528	if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
1529		seq_puts(seq, ",fatal_errors=panic");
1530	if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
1531		seq_printf(seq, ",commit=%u", info->commit_interval);
1532#ifdef CONFIG_BTRFS_DEBUG
1533	if (btrfs_test_opt(info, FRAGMENT_DATA))
1534		seq_puts(seq, ",fragment=data");
1535	if (btrfs_test_opt(info, FRAGMENT_METADATA))
1536		seq_puts(seq, ",fragment=metadata");
1537#endif
1538	if (btrfs_test_opt(info, REF_VERIFY))
1539		seq_puts(seq, ",ref_verify");
1540	seq_printf(seq, ",subvolid=%llu",
1541		  BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1542	subvol_name = btrfs_get_subvol_name_from_objectid(info,
1543			BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1544	if (!IS_ERR(subvol_name)) {
1545		seq_puts(seq, ",subvol=");
1546		seq_escape(seq, subvol_name, " \t\n\\");
1547		kfree(subvol_name);
1548	}
1549	return 0;
1550}
1551
1552static int btrfs_test_super(struct super_block *s, void *data)
1553{
1554	struct btrfs_fs_info *p = data;
1555	struct btrfs_fs_info *fs_info = btrfs_sb(s);
1556
1557	return fs_info->fs_devices == p->fs_devices;
1558}
1559
1560static int btrfs_set_super(struct super_block *s, void *data)
1561{
1562	int err = set_anon_super(s, data);
1563	if (!err)
1564		s->s_fs_info = data;
1565	return err;
1566}
1567
1568/*
1569 * subvolumes are identified by ino 256
1570 */
1571static inline int is_subvolume_inode(struct inode *inode)
1572{
1573	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
1574		return 1;
1575	return 0;
1576}
1577
1578static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1579				   struct vfsmount *mnt)
1580{
1581	struct dentry *root;
1582	int ret;
1583
1584	if (!subvol_name) {
1585		if (!subvol_objectid) {
1586			ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
1587							  &subvol_objectid);
1588			if (ret) {
1589				root = ERR_PTR(ret);
1590				goto out;
1591			}
1592		}
1593		subvol_name = btrfs_get_subvol_name_from_objectid(
1594					btrfs_sb(mnt->mnt_sb), subvol_objectid);
1595		if (IS_ERR(subvol_name)) {
1596			root = ERR_CAST(subvol_name);
1597			subvol_name = NULL;
1598			goto out;
1599		}
1600
1601	}
1602
1603	root = mount_subtree(mnt, subvol_name);
1604	/* mount_subtree() drops our reference on the vfsmount. */
1605	mnt = NULL;
1606
1607	if (!IS_ERR(root)) {
1608		struct super_block *s = root->d_sb;
1609		struct btrfs_fs_info *fs_info = btrfs_sb(s);
1610		struct inode *root_inode = d_inode(root);
1611		u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
1612
1613		ret = 0;
1614		if (!is_subvolume_inode(root_inode)) {
1615			btrfs_err(fs_info, "'%s' is not a valid subvolume",
1616			       subvol_name);
1617			ret = -EINVAL;
1618		}
1619		if (subvol_objectid && root_objectid != subvol_objectid) {
1620			/*
1621			 * This will also catch a race condition where a
1622			 * subvolume which was passed by ID is renamed and
1623			 * another subvolume is renamed over the old location.
1624			 */
1625			btrfs_err(fs_info,
1626				  "subvol '%s' does not match subvolid %llu",
1627				  subvol_name, subvol_objectid);
1628			ret = -EINVAL;
1629		}
1630		if (ret) {
1631			dput(root);
1632			root = ERR_PTR(ret);
1633			deactivate_locked_super(s);
1634		}
1635	}
1636
1637out:
1638	mntput(mnt);
1639	kfree(subvol_name);
1640	return root;
1641}
1642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1643/*
1644 * Find a superblock for the given device / mount point.
1645 *
1646 * Note: This is based on mount_bdev from fs/super.c with a few additions
1647 *       for multiple device setup.  Make sure to keep it in sync.
1648 */
1649static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
1650		int flags, const char *device_name, void *data)
1651{
1652	struct block_device *bdev = NULL;
1653	struct super_block *s;
1654	struct btrfs_device *device = NULL;
1655	struct btrfs_fs_devices *fs_devices = NULL;
1656	struct btrfs_fs_info *fs_info = NULL;
1657	void *new_sec_opts = NULL;
1658	fmode_t mode = FMODE_READ;
1659	int error = 0;
1660
1661	if (!(flags & SB_RDONLY))
1662		mode |= FMODE_WRITE;
1663
 
 
 
 
 
 
 
1664	if (data) {
1665		error = security_sb_eat_lsm_opts(data, &new_sec_opts);
1666		if (error)
1667			return ERR_PTR(error);
1668	}
1669
 
 
 
 
1670	/*
1671	 * Setup a dummy root and fs_info for test/set super.  This is because
1672	 * we don't actually fill this stuff out until open_ctree, but we need
1673	 * then open_ctree will properly initialize the file system specific
1674	 * settings later.  btrfs_init_fs_info initializes the static elements
1675	 * of the fs_info (locks and such) to make cleanup easier if we find a
1676	 * superblock with our given fs_devices later on at sget() time.
1677	 */
1678	fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL);
1679	if (!fs_info) {
1680		error = -ENOMEM;
1681		goto error_sec_opts;
1682	}
1683	btrfs_init_fs_info(fs_info);
 
1684
1685	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
1686	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
 
1687	if (!fs_info->super_copy || !fs_info->super_for_commit) {
1688		error = -ENOMEM;
1689		goto error_fs_info;
1690	}
1691
1692	mutex_lock(&uuid_mutex);
1693	error = btrfs_parse_device_options(data, mode, fs_type);
1694	if (error) {
1695		mutex_unlock(&uuid_mutex);
1696		goto error_fs_info;
1697	}
1698
1699	device = btrfs_scan_one_device(device_name, mode, fs_type);
1700	if (IS_ERR(device)) {
1701		mutex_unlock(&uuid_mutex);
1702		error = PTR_ERR(device);
1703		goto error_fs_info;
1704	}
1705
1706	fs_devices = device->fs_devices;
1707	fs_info->fs_devices = fs_devices;
1708
1709	error = btrfs_open_devices(fs_devices, mode, fs_type);
1710	mutex_unlock(&uuid_mutex);
1711	if (error)
1712		goto error_fs_info;
1713
1714	if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
1715		error = -EACCES;
1716		goto error_close_devices;
1717	}
1718
1719	bdev = fs_devices->latest_bdev;
1720	s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
1721		 fs_info);
1722	if (IS_ERR(s)) {
1723		error = PTR_ERR(s);
1724		goto error_close_devices;
1725	}
1726
1727	if (s->s_root) {
1728		btrfs_close_devices(fs_devices);
1729		btrfs_free_fs_info(fs_info);
1730		if ((flags ^ s->s_flags) & SB_RDONLY)
1731			error = -EBUSY;
1732	} else {
1733		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1734		btrfs_sb(s)->bdev_holder = fs_type;
1735		if (!strstr(crc32c_impl(), "generic"))
1736			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
1737		error = btrfs_fill_super(s, fs_devices, data);
1738	}
1739	if (!error)
1740		error = security_sb_set_mnt_opts(s, new_sec_opts, 0, NULL);
1741	security_free_mnt_opts(&new_sec_opts);
1742	if (error) {
1743		deactivate_locked_super(s);
1744		return ERR_PTR(error);
 
 
 
 
 
 
 
1745	}
1746
1747	return dget(s->s_root);
1748
1749error_close_devices:
1750	btrfs_close_devices(fs_devices);
1751error_fs_info:
1752	btrfs_free_fs_info(fs_info);
1753error_sec_opts:
1754	security_free_mnt_opts(&new_sec_opts);
1755	return ERR_PTR(error);
1756}
1757
1758/*
1759 * Mount function which is called by VFS layer.
1760 *
1761 * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
1762 * which needs vfsmount* of device's root (/).  This means device's root has to
1763 * be mounted internally in any case.
1764 *
1765 * Operation flow:
1766 *   1. Parse subvol id related options for later use in mount_subvol().
1767 *
1768 *   2. Mount device's root (/) by calling vfs_kern_mount().
1769 *
1770 *      NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
1771 *      first place. In order to avoid calling btrfs_mount() again, we use
1772 *      different file_system_type which is not registered to VFS by
1773 *      register_filesystem() (btrfs_root_fs_type). As a result,
1774 *      btrfs_mount_root() is called. The return value will be used by
1775 *      mount_subtree() in mount_subvol().
1776 *
1777 *   3. Call mount_subvol() to get the dentry of subvolume. Since there is
1778 *      "btrfs subvolume set-default", mount_subvol() is called always.
1779 */
1780static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1781		const char *device_name, void *data)
1782{
1783	struct vfsmount *mnt_root;
1784	struct dentry *root;
 
1785	char *subvol_name = NULL;
1786	u64 subvol_objectid = 0;
1787	int error = 0;
1788
1789	error = btrfs_parse_subvol_options(data, &subvol_name,
1790					&subvol_objectid);
 
 
 
1791	if (error) {
1792		kfree(subvol_name);
1793		return ERR_PTR(error);
1794	}
1795
1796	/* mount device's root (/) */
1797	mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
1798	if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
1799		if (flags & SB_RDONLY) {
1800			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1801				flags & ~SB_RDONLY, device_name, data);
1802		} else {
1803			mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
1804				flags | SB_RDONLY, device_name, data);
1805			if (IS_ERR(mnt_root)) {
1806				root = ERR_CAST(mnt_root);
1807				kfree(subvol_name);
1808				goto out;
1809			}
1810
1811			down_write(&mnt_root->mnt_sb->s_umount);
1812			error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
1813			up_write(&mnt_root->mnt_sb->s_umount);
1814			if (error < 0) {
1815				root = ERR_PTR(error);
1816				mntput(mnt_root);
1817				kfree(subvol_name);
1818				goto out;
1819			}
1820		}
1821	}
1822	if (IS_ERR(mnt_root)) {
1823		root = ERR_CAST(mnt_root);
1824		kfree(subvol_name);
1825		goto out;
1826	}
1827
1828	/* mount_subvol() will free subvol_name and mnt_root */
1829	root = mount_subvol(subvol_name, subvol_objectid, mnt_root);
1830
1831out:
1832	return root;
1833}
1834
1835static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1836				     u32 new_pool_size, u32 old_pool_size)
1837{
1838	if (new_pool_size == old_pool_size)
1839		return;
1840
1841	fs_info->thread_pool_size = new_pool_size;
1842
1843	btrfs_info(fs_info, "resize thread pool %d -> %d",
1844	       old_pool_size, new_pool_size);
1845
1846	btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
1847	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
 
1848	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
1849	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
1850	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
1851	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
1852				new_pool_size);
1853	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
1854	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
1855	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
1856	btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size);
1857	btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers,
1858				new_pool_size);
1859}
1860
 
 
 
 
 
1861static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
1862				       unsigned long old_opts, int flags)
1863{
1864	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1865	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
1866	     (flags & SB_RDONLY))) {
1867		/* wait for any defraggers to finish */
1868		wait_event(fs_info->transaction_wait,
1869			   (atomic_read(&fs_info->defrag_running) == 0));
1870		if (flags & SB_RDONLY)
1871			sync_filesystem(fs_info->sb);
1872	}
1873}
1874
1875static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
1876					 unsigned long old_opts)
1877{
1878	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
1879
1880	/*
1881	 * We need to cleanup all defragable inodes if the autodefragment is
1882	 * close or the filesystem is read only.
1883	 */
1884	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
1885	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
1886		btrfs_cleanup_defrag_inodes(fs_info);
1887	}
1888
1889	/* If we toggled discard async */
1890	if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1891	    btrfs_test_opt(fs_info, DISCARD_ASYNC))
1892		btrfs_discard_resume(fs_info);
1893	else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) &&
1894		 !btrfs_test_opt(fs_info, DISCARD_ASYNC))
1895		btrfs_discard_cleanup(fs_info);
1896
1897	/* If we toggled space cache */
1898	if (cache_opt != btrfs_free_space_cache_v1_active(fs_info))
1899		btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
1900}
1901
1902static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1903{
1904	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
 
1905	unsigned old_flags = sb->s_flags;
1906	unsigned long old_opts = fs_info->mount_opt;
1907	unsigned long old_compress_type = fs_info->compress_type;
1908	u64 old_max_inline = fs_info->max_inline;
1909	u32 old_thread_pool_size = fs_info->thread_pool_size;
1910	u32 old_metadata_ratio = fs_info->metadata_ratio;
1911	int ret;
1912
1913	sync_filesystem(sb);
1914	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1915
1916	if (data) {
1917		void *new_sec_opts = NULL;
1918
1919		ret = security_sb_eat_lsm_opts(data, &new_sec_opts);
1920		if (!ret)
1921			ret = security_sb_remount(sb, new_sec_opts);
1922		security_free_mnt_opts(&new_sec_opts);
1923		if (ret)
1924			goto restore;
 
 
 
 
 
 
1925	}
1926
1927	ret = btrfs_parse_options(fs_info, data, *flags);
1928	if (ret)
 
1929		goto restore;
 
1930
1931	btrfs_remount_begin(fs_info, old_opts, *flags);
1932	btrfs_resize_thread_pool(fs_info,
1933		fs_info->thread_pool_size, old_thread_pool_size);
1934
1935	if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
1936	    (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
1937	    (!sb_rdonly(sb) || (*flags & SB_RDONLY))) {
1938		btrfs_warn(fs_info,
1939		"remount supports changing free space tree only from ro to rw");
1940		/* Make sure free space cache options match the state on disk */
1941		if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
1942			btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
1943			btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
1944		}
1945		if (btrfs_free_space_cache_v1_active(fs_info)) {
1946			btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE);
1947			btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE);
1948		}
1949	}
1950
1951	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
1952		goto out;
1953
1954	if (*flags & SB_RDONLY) {
1955		/*
1956		 * this also happens on 'umount -rf' or on shutdown, when
1957		 * the filesystem is busy.
1958		 */
1959		cancel_work_sync(&fs_info->async_reclaim_work);
1960		cancel_work_sync(&fs_info->async_data_reclaim_work);
1961
1962		btrfs_discard_cleanup(fs_info);
1963
1964		/* wait for the uuid_scan task to finish */
1965		down(&fs_info->uuid_tree_rescan_sem);
1966		/* avoid complains from lockdep et al. */
1967		up(&fs_info->uuid_tree_rescan_sem);
1968
1969		btrfs_set_sb_rdonly(sb);
1970
1971		/*
1972		 * Setting SB_RDONLY will put the cleaner thread to
1973		 * sleep at the next loop if it's already active.
1974		 * If it's already asleep, we'll leave unused block
1975		 * groups on disk until we're mounted read-write again
1976		 * unless we clean them up here.
1977		 */
1978		btrfs_delete_unused_bgs(fs_info);
1979
1980		/*
1981		 * The cleaner task could be already running before we set the
1982		 * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
1983		 * We must make sure that after we finish the remount, i.e. after
1984		 * we call btrfs_commit_super(), the cleaner can no longer start
1985		 * a transaction - either because it was dropping a dead root,
1986		 * running delayed iputs or deleting an unused block group (the
1987		 * cleaner picked a block group from the list of unused block
1988		 * groups before we were able to in the previous call to
1989		 * btrfs_delete_unused_bgs()).
1990		 */
1991		wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
1992			    TASK_UNINTERRUPTIBLE);
1993
1994		/*
1995		 * We've set the superblock to RO mode, so we might have made
1996		 * the cleaner task sleep without running all pending delayed
1997		 * iputs. Go through all the delayed iputs here, so that if an
1998		 * unmount happens without remounting RW we don't end up at
1999		 * finishing close_ctree() with a non-empty list of delayed
2000		 * iputs.
2001		 */
2002		btrfs_run_delayed_iputs(fs_info);
2003
2004		btrfs_dev_replace_suspend_for_unmount(fs_info);
2005		btrfs_scrub_cancel(fs_info);
2006		btrfs_pause_balance(fs_info);
2007
2008		/*
2009		 * Pause the qgroup rescan worker if it is running. We don't want
2010		 * it to be still running after we are in RO mode, as after that,
2011		 * by the time we unmount, it might have left a transaction open,
2012		 * so we would leak the transaction and/or crash.
2013		 */
2014		btrfs_qgroup_wait_for_completion(fs_info, false);
2015
2016		ret = btrfs_commit_super(fs_info);
2017		if (ret)
2018			goto restore;
2019	} else {
2020		if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2021			btrfs_err(fs_info,
2022				"Remounting read-write after error is not allowed");
2023			ret = -EINVAL;
2024			goto restore;
2025		}
2026		if (fs_info->fs_devices->rw_devices == 0) {
2027			ret = -EACCES;
2028			goto restore;
2029		}
2030
2031		if (!btrfs_check_rw_degradable(fs_info, NULL)) {
2032			btrfs_warn(fs_info,
2033		"too many missing devices, writable remount is not allowed");
2034			ret = -EACCES;
2035			goto restore;
2036		}
2037
2038		if (btrfs_super_log_root(fs_info->super_copy) != 0) {
2039			btrfs_warn(fs_info,
2040		"mount required to replay tree-log, cannot remount read-write");
2041			ret = -EINVAL;
2042			goto restore;
2043		}
2044		if (fs_info->sectorsize < PAGE_SIZE) {
2045			btrfs_warn(fs_info,
2046	"read-write mount is not yet allowed for sectorsize %u page size %lu",
2047				   fs_info->sectorsize, PAGE_SIZE);
2048			ret = -EINVAL;
 
 
 
 
 
2049			goto restore;
2050		}
2051
2052		/*
2053		 * NOTE: when remounting with a change that does writes, don't
2054		 * put it anywhere above this point, as we are not sure to be
2055		 * safe to write until we pass the above checks.
2056		 */
2057		ret = btrfs_start_pre_rw_mount(fs_info);
2058		if (ret)
2059			goto restore;
2060
2061		btrfs_clear_sb_rdonly(sb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2062
2063		set_bit(BTRFS_FS_OPEN, &fs_info->flags);
2064	}
2065out:
2066	/*
2067	 * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS,
2068	 * since the absence of the flag means it can be toggled off by remount.
2069	 */
2070	*flags |= SB_I_VERSION;
2071
2072	wake_up_process(fs_info->transaction_kthread);
2073	btrfs_remount_cleanup(fs_info, old_opts);
2074	btrfs_clear_oneshot_options(fs_info);
2075	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
2076
2077	return 0;
2078
2079restore:
2080	/* We've hit an error - don't reset SB_RDONLY */
2081	if (sb_rdonly(sb))
2082		old_flags |= SB_RDONLY;
2083	if (!(old_flags & SB_RDONLY))
2084		clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2085	sb->s_flags = old_flags;
2086	fs_info->mount_opt = old_opts;
2087	fs_info->compress_type = old_compress_type;
2088	fs_info->max_inline = old_max_inline;
2089	btrfs_resize_thread_pool(fs_info,
2090		old_thread_pool_size, fs_info->thread_pool_size);
2091	fs_info->metadata_ratio = old_metadata_ratio;
2092	btrfs_remount_cleanup(fs_info, old_opts);
2093	clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
2094
2095	return ret;
2096}
2097
2098/* Used to sort the devices by max_avail(descending sort) */
2099static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
2100				       const void *dev_info2)
2101{
2102	if (((struct btrfs_device_info *)dev_info1)->max_avail >
2103	    ((struct btrfs_device_info *)dev_info2)->max_avail)
2104		return -1;
2105	else if (((struct btrfs_device_info *)dev_info1)->max_avail <
2106		 ((struct btrfs_device_info *)dev_info2)->max_avail)
2107		return 1;
2108	else
2109	return 0;
2110}
2111
2112/*
2113 * sort the devices by max_avail, in which max free extent size of each device
2114 * is stored.(Descending Sort)
2115 */
2116static inline void btrfs_descending_sort_devices(
2117					struct btrfs_device_info *devices,
2118					size_t nr_devices)
2119{
2120	sort(devices, nr_devices, sizeof(struct btrfs_device_info),
2121	     btrfs_cmp_device_free_bytes, NULL);
2122}
2123
2124/*
2125 * The helper to calc the free space on the devices that can be used to store
2126 * file data.
2127 */
2128static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
2129					      u64 *free_bytes)
2130{
2131	struct btrfs_device_info *devices_info;
2132	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2133	struct btrfs_device *device;
 
2134	u64 type;
2135	u64 avail_space;
2136	u64 min_stripe_size;
2137	int num_stripes = 1;
2138	int i = 0, nr_devices;
2139	const struct btrfs_raid_attr *rattr;
2140
2141	/*
2142	 * We aren't under the device list lock, so this is racy-ish, but good
2143	 * enough for our purposes.
2144	 */
2145	nr_devices = fs_info->fs_devices->open_devices;
2146	if (!nr_devices) {
2147		smp_mb();
2148		nr_devices = fs_info->fs_devices->open_devices;
2149		ASSERT(nr_devices);
2150		if (!nr_devices) {
2151			*free_bytes = 0;
2152			return 0;
2153		}
2154	}
2155
2156	devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
2157			       GFP_KERNEL);
2158	if (!devices_info)
2159		return -ENOMEM;
2160
2161	/* calc min stripe number for data space allocation */
2162	type = btrfs_data_alloc_profile(fs_info);
2163	rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)];
2164
2165	if (type & BTRFS_BLOCK_GROUP_RAID0)
2166		num_stripes = nr_devices;
2167	else if (type & BTRFS_BLOCK_GROUP_RAID1)
 
2168		num_stripes = 2;
2169	else if (type & BTRFS_BLOCK_GROUP_RAID1C3)
2170		num_stripes = 3;
2171	else if (type & BTRFS_BLOCK_GROUP_RAID1C4)
2172		num_stripes = 4;
2173	else if (type & BTRFS_BLOCK_GROUP_RAID10)
2174		num_stripes = 4;
 
2175
2176	/* Adjust for more than 1 stripe per device */
2177	min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN;
 
 
2178
2179	rcu_read_lock();
2180	list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2181		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2182						&device->dev_state) ||
2183		    !device->bdev ||
2184		    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2185			continue;
2186
2187		if (i >= nr_devices)
2188			break;
2189
2190		avail_space = device->total_bytes - device->bytes_used;
2191
2192		/* align with stripe_len */
2193		avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN);
 
2194
2195		/*
2196		 * In order to avoid overwriting the superblock on the drive,
2197		 * btrfs starts at an offset of at least 1MB when doing chunk
2198		 * allocation.
2199		 *
2200		 * This ensures we have at least min_stripe_size free space
2201		 * after excluding 1MB.
2202		 */
2203		if (avail_space <= SZ_1M + min_stripe_size)
 
 
 
 
 
 
 
 
 
 
 
2204			continue;
2205
2206		avail_space -= SZ_1M;
2207
2208		devices_info[i].dev = device;
2209		devices_info[i].max_avail = avail_space;
2210
2211		i++;
2212	}
2213	rcu_read_unlock();
2214
2215	nr_devices = i;
2216
2217	btrfs_descending_sort_devices(devices_info, nr_devices);
2218
2219	i = nr_devices - 1;
2220	avail_space = 0;
2221	while (nr_devices >= rattr->devs_min) {
2222		num_stripes = min(num_stripes, nr_devices);
 
2223
2224		if (devices_info[i].max_avail >= min_stripe_size) {
2225			int j;
2226			u64 alloc_size;
2227
2228			avail_space += devices_info[i].max_avail * num_stripes;
2229			alloc_size = devices_info[i].max_avail;
2230			for (j = i + 1 - num_stripes; j <= i; j++)
2231				devices_info[j].max_avail -= alloc_size;
2232		}
2233		i--;
2234		nr_devices--;
2235	}
2236
2237	kfree(devices_info);
2238	*free_bytes = avail_space;
2239	return 0;
2240}
2241
2242/*
2243 * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
2244 *
2245 * If there's a redundant raid level at DATA block groups, use the respective
2246 * multiplier to scale the sizes.
2247 *
2248 * Unused device space usage is based on simulating the chunk allocator
2249 * algorithm that respects the device sizes and order of allocations.  This is
2250 * a close approximation of the actual use but there are other factors that may
2251 * change the result (like a new metadata chunk).
2252 *
2253 * If metadata is exhausted, f_bavail will be 0.
2254 */
2255static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2256{
2257	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
2258	struct btrfs_super_block *disk_super = fs_info->super_copy;
 
2259	struct btrfs_space_info *found;
2260	u64 total_used = 0;
2261	u64 total_free_data = 0;
2262	u64 total_free_meta = 0;
2263	u32 bits = fs_info->sectorsize_bits;
2264	__be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
2265	unsigned factor = 1;
2266	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
2267	int ret;
2268	u64 thresh = 0;
2269	int mixed = 0;
2270
2271	list_for_each_entry(found, &fs_info->space_info, list) {
 
2272		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
2273			int i;
2274
2275			total_free_data += found->disk_total - found->disk_used;
2276			total_free_data -=
2277				btrfs_account_ro_block_groups_free_space(found);
2278
2279			for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2280				if (!list_empty(&found->block_groups[i]))
2281					factor = btrfs_bg_type_to_factor(
2282						btrfs_raid_array[i].bg_flag);
 
 
 
 
 
2283			}
2284		}
2285
2286		/*
2287		 * Metadata in mixed block goup profiles are accounted in data
2288		 */
2289		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
2290			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
2291				mixed = 1;
2292			else
2293				total_free_meta += found->disk_total -
2294					found->disk_used;
2295		}
2296
2297		total_used += found->disk_used;
2298	}
2299
 
 
2300	buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
2301	buf->f_blocks >>= bits;
2302	buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
2303
2304	/* Account global block reserve as used, it's in logical size already */
2305	spin_lock(&block_rsv->lock);
2306	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
2307	if (buf->f_bfree >= block_rsv->size >> bits)
2308		buf->f_bfree -= block_rsv->size >> bits;
2309	else
2310		buf->f_bfree = 0;
2311	spin_unlock(&block_rsv->lock);
2312
2313	buf->f_bavail = div_u64(total_free_data, factor);
2314	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
2315	if (ret)
2316		return ret;
2317	buf->f_bavail += div_u64(total_free_data, factor);
2318	buf->f_bavail = buf->f_bavail >> bits;
2319
2320	/*
2321	 * We calculate the remaining metadata space minus global reserve. If
2322	 * this is (supposedly) smaller than zero, there's no space. But this
2323	 * does not hold in practice, the exhausted state happens where's still
2324	 * some positive delta. So we apply some guesswork and compare the
2325	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
2326	 *
2327	 * We probably cannot calculate the exact threshold value because this
2328	 * depends on the internal reservations requested by various
2329	 * operations, so some operations that consume a few metadata will
2330	 * succeed even if the Avail is zero. But this is better than the other
2331	 * way around.
2332	 */
2333	thresh = SZ_4M;
2334
2335	/*
2336	 * We only want to claim there's no available space if we can no longer
2337	 * allocate chunks for our metadata profile and our global reserve will
2338	 * not fit in the free metadata space.  If we aren't ->full then we
2339	 * still can allocate chunks and thus are fine using the currently
2340	 * calculated f_bavail.
2341	 */
2342	if (!mixed && block_rsv->space_info->full &&
2343	    total_free_meta - thresh < block_rsv->size)
2344		buf->f_bavail = 0;
2345
2346	buf->f_type = BTRFS_SUPER_MAGIC;
2347	buf->f_bsize = dentry->d_sb->s_blocksize;
2348	buf->f_namelen = BTRFS_NAME_LEN;
2349
2350	/* We treat it as constant endianness (it doesn't matter _which_)
2351	   because we want the fsid to come out the same whether mounted
2352	   on a big-endian or little-endian host */
2353	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
2354	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
2355	/* Mask in the root object ID too, to disambiguate subvols */
2356	buf->f_fsid.val[0] ^=
2357		BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
2358	buf->f_fsid.val[1] ^=
2359		BTRFS_I(d_inode(dentry))->root->root_key.objectid;
2360
2361	return 0;
2362}
2363
2364static void btrfs_kill_super(struct super_block *sb)
2365{
2366	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2367	kill_anon_super(sb);
2368	btrfs_free_fs_info(fs_info);
2369}
2370
2371static struct file_system_type btrfs_fs_type = {
2372	.owner		= THIS_MODULE,
2373	.name		= "btrfs",
2374	.mount		= btrfs_mount,
2375	.kill_sb	= btrfs_kill_super,
2376	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2377};
2378
2379static struct file_system_type btrfs_root_fs_type = {
2380	.owner		= THIS_MODULE,
2381	.name		= "btrfs",
2382	.mount		= btrfs_mount_root,
2383	.kill_sb	= btrfs_kill_super,
2384	.fs_flags	= FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
2385};
2386
2387MODULE_ALIAS_FS("btrfs");
2388
2389static int btrfs_control_open(struct inode *inode, struct file *file)
2390{
2391	/*
2392	 * The control file's private_data is used to hold the
2393	 * transaction when it is started and is used to keep
2394	 * track of whether a transaction is already in progress.
2395	 */
2396	file->private_data = NULL;
2397	return 0;
2398}
2399
2400/*
2401 * Used by /dev/btrfs-control for devices ioctls.
2402 */
2403static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
2404				unsigned long arg)
2405{
2406	struct btrfs_ioctl_vol_args *vol;
2407	struct btrfs_device *device = NULL;
2408	int ret = -ENOTTY;
2409
2410	if (!capable(CAP_SYS_ADMIN))
2411		return -EPERM;
2412
2413	vol = memdup_user((void __user *)arg, sizeof(*vol));
2414	if (IS_ERR(vol))
2415		return PTR_ERR(vol);
2416	vol->name[BTRFS_PATH_NAME_MAX] = '\0';
2417
2418	switch (cmd) {
2419	case BTRFS_IOC_SCAN_DEV:
2420		mutex_lock(&uuid_mutex);
2421		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2422					       &btrfs_root_fs_type);
2423		ret = PTR_ERR_OR_ZERO(device);
2424		mutex_unlock(&uuid_mutex);
2425		break;
2426	case BTRFS_IOC_FORGET_DEV:
2427		ret = btrfs_forget_devices(vol->name);
2428		break;
2429	case BTRFS_IOC_DEVICES_READY:
2430		mutex_lock(&uuid_mutex);
2431		device = btrfs_scan_one_device(vol->name, FMODE_READ,
2432					       &btrfs_root_fs_type);
2433		if (IS_ERR(device)) {
2434			mutex_unlock(&uuid_mutex);
2435			ret = PTR_ERR(device);
2436			break;
2437		}
2438		ret = !(device->fs_devices->num_devices ==
2439			device->fs_devices->total_devices);
2440		mutex_unlock(&uuid_mutex);
2441		break;
2442	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
2443		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
2444		break;
2445	}
2446
2447	kfree(vol);
2448	return ret;
2449}
2450
2451static int btrfs_freeze(struct super_block *sb)
2452{
2453	struct btrfs_trans_handle *trans;
2454	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2455	struct btrfs_root *root = fs_info->tree_root;
2456
2457	set_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2458	/*
2459	 * We don't need a barrier here, we'll wait for any transaction that
2460	 * could be in progress on other threads (and do delayed iputs that
2461	 * we want to avoid on a frozen filesystem), or do the commit
2462	 * ourselves.
2463	 */
2464	trans = btrfs_attach_transaction_barrier(root);
2465	if (IS_ERR(trans)) {
2466		/* no transaction, don't bother */
2467		if (PTR_ERR(trans) == -ENOENT)
2468			return 0;
2469		return PTR_ERR(trans);
2470	}
2471	return btrfs_commit_transaction(trans);
2472}
2473
2474static int btrfs_unfreeze(struct super_block *sb)
2475{
2476	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2477
2478	clear_bit(BTRFS_FS_FROZEN, &fs_info->flags);
2479	return 0;
2480}
2481
2482static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
2483{
2484	struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
 
2485	struct btrfs_device *dev, *first_dev = NULL;
 
 
2486
2487	/*
2488	 * Lightweight locking of the devices. We should not need
2489	 * device_list_mutex here as we only read the device data and the list
2490	 * is protected by RCU.  Even if a device is deleted during the list
2491	 * traversals, we'll get valid data, the freeing callback will wait at
2492	 * least until the rcu_read_unlock.
2493	 */
2494	rcu_read_lock();
2495	list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) {
2496		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
2497			continue;
2498		if (!dev->name)
2499			continue;
2500		if (!first_dev || dev->devid < first_dev->devid)
2501			first_dev = dev;
 
 
 
 
 
 
 
 
 
 
 
 
2502	}
2503
2504	if (first_dev)
2505		seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
2506	else
2507		WARN_ON(1);
2508	rcu_read_unlock();
2509	return 0;
2510}
2511
2512static const struct super_operations btrfs_super_ops = {
2513	.drop_inode	= btrfs_drop_inode,
2514	.evict_inode	= btrfs_evict_inode,
2515	.put_super	= btrfs_put_super,
2516	.sync_fs	= btrfs_sync_fs,
2517	.show_options	= btrfs_show_options,
2518	.show_devname	= btrfs_show_devname,
 
2519	.alloc_inode	= btrfs_alloc_inode,
2520	.destroy_inode	= btrfs_destroy_inode,
2521	.free_inode	= btrfs_free_inode,
2522	.statfs		= btrfs_statfs,
2523	.remount_fs	= btrfs_remount,
2524	.freeze_fs	= btrfs_freeze,
2525	.unfreeze_fs	= btrfs_unfreeze,
2526};
2527
2528static const struct file_operations btrfs_ctl_fops = {
2529	.open = btrfs_control_open,
2530	.unlocked_ioctl	 = btrfs_control_ioctl,
2531	.compat_ioctl = compat_ptr_ioctl,
2532	.owner	 = THIS_MODULE,
2533	.llseek = noop_llseek,
2534};
2535
2536static struct miscdevice btrfs_misc = {
2537	.minor		= BTRFS_MINOR,
2538	.name		= "btrfs-control",
2539	.fops		= &btrfs_ctl_fops
2540};
2541
2542MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
2543MODULE_ALIAS("devname:btrfs-control");
2544
2545static int __init btrfs_interface_init(void)
2546{
2547	return misc_register(&btrfs_misc);
2548}
2549
2550static __cold void btrfs_interface_exit(void)
2551{
2552	misc_deregister(&btrfs_misc);
2553}
2554
2555static void __init btrfs_print_mod_info(void)
2556{
2557	static const char options[] = ""
2558#ifdef CONFIG_BTRFS_DEBUG
2559			", debug=on"
2560#endif
2561#ifdef CONFIG_BTRFS_ASSERT
2562			", assert=on"
2563#endif
2564#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2565			", integrity-checker=on"
2566#endif
2567#ifdef CONFIG_BTRFS_FS_REF_VERIFY
2568			", ref-verify=on"
2569#endif
2570#ifdef CONFIG_BLK_DEV_ZONED
2571			", zoned=yes"
2572#else
2573			", zoned=no"
2574#endif
2575			;
2576	pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
2577}
2578
2579static int __init init_btrfs_fs(void)
2580{
2581	int err;
2582
2583	btrfs_props_init();
2584
2585	err = btrfs_init_sysfs();
2586	if (err)
2587		return err;
2588
2589	btrfs_init_compress();
2590
2591	err = btrfs_init_cachep();
2592	if (err)
2593		goto free_compress;
2594
2595	err = extent_io_init();
2596	if (err)
2597		goto free_cachep;
2598
2599	err = extent_state_cache_init();
2600	if (err)
2601		goto free_extent_io;
2602
2603	err = extent_map_init();
2604	if (err)
2605		goto free_extent_state_cache;
2606
2607	err = ordered_data_init();
2608	if (err)
2609		goto free_extent_map;
2610
2611	err = btrfs_delayed_inode_init();
2612	if (err)
2613		goto free_ordered_data;
2614
2615	err = btrfs_auto_defrag_init();
2616	if (err)
2617		goto free_delayed_inode;
2618
2619	err = btrfs_delayed_ref_init();
2620	if (err)
2621		goto free_auto_defrag;
2622
2623	err = btrfs_prelim_ref_init();
2624	if (err)
2625		goto free_delayed_ref;
2626
2627	err = btrfs_end_io_wq_init();
2628	if (err)
2629		goto free_prelim_ref;
2630
2631	err = btrfs_interface_init();
2632	if (err)
2633		goto free_end_io_wq;
2634
 
 
2635	btrfs_print_mod_info();
2636
2637	err = btrfs_run_sanity_tests();
2638	if (err)
2639		goto unregister_ioctl;
2640
2641	err = register_filesystem(&btrfs_fs_type);
2642	if (err)
2643		goto unregister_ioctl;
2644
2645	return 0;
2646
2647unregister_ioctl:
2648	btrfs_interface_exit();
2649free_end_io_wq:
2650	btrfs_end_io_wq_exit();
2651free_prelim_ref:
2652	btrfs_prelim_ref_exit();
2653free_delayed_ref:
2654	btrfs_delayed_ref_exit();
2655free_auto_defrag:
2656	btrfs_auto_defrag_exit();
2657free_delayed_inode:
2658	btrfs_delayed_inode_exit();
2659free_ordered_data:
2660	ordered_data_exit();
2661free_extent_map:
2662	extent_map_exit();
2663free_extent_state_cache:
2664	extent_state_cache_exit();
2665free_extent_io:
2666	extent_io_exit();
2667free_cachep:
2668	btrfs_destroy_cachep();
2669free_compress:
2670	btrfs_exit_compress();
2671	btrfs_exit_sysfs();
2672
2673	return err;
2674}
2675
2676static void __exit exit_btrfs_fs(void)
2677{
2678	btrfs_destroy_cachep();
2679	btrfs_delayed_ref_exit();
2680	btrfs_auto_defrag_exit();
2681	btrfs_delayed_inode_exit();
2682	btrfs_prelim_ref_exit();
2683	ordered_data_exit();
2684	extent_map_exit();
2685	extent_state_cache_exit();
2686	extent_io_exit();
2687	btrfs_interface_exit();
2688	btrfs_end_io_wq_exit();
2689	unregister_filesystem(&btrfs_fs_type);
2690	btrfs_exit_sysfs();
2691	btrfs_cleanup_fs_uuids();
2692	btrfs_exit_compress();
2693}
2694
2695late_initcall(init_btrfs_fs);
2696module_exit(exit_btrfs_fs)
2697
2698MODULE_LICENSE("GPL");
2699MODULE_SOFTDEP("pre: crc32c");
2700MODULE_SOFTDEP("pre: xxhash64");
2701MODULE_SOFTDEP("pre: sha256");
2702MODULE_SOFTDEP("pre: blake2b-256");