Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/super.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/module.h>
   9#include <linux/init.h>
  10#include <linux/fs.h>
  11#include <linux/fs_context.h>
  12#include <linux/sched/mm.h>
  13#include <linux/statfs.h>
  14#include <linux/buffer_head.h>
  15#include <linux/kthread.h>
  16#include <linux/parser.h>
  17#include <linux/mount.h>
  18#include <linux/seq_file.h>
  19#include <linux/proc_fs.h>
  20#include <linux/random.h>
  21#include <linux/exportfs.h>
  22#include <linux/blkdev.h>
  23#include <linux/quotaops.h>
  24#include <linux/f2fs_fs.h>
  25#include <linux/sysfs.h>
  26#include <linux/quota.h>
  27#include <linux/unicode.h>
  28#include <linux/part_stat.h>
  29#include <linux/zstd.h>
  30#include <linux/lz4.h>
  31
  32#include "f2fs.h"
  33#include "node.h"
  34#include "segment.h"
  35#include "xattr.h"
  36#include "gc.h"
  37#include "iostat.h"
  38
  39#define CREATE_TRACE_POINTS
  40#include <trace/events/f2fs.h>
  41
  42static struct kmem_cache *f2fs_inode_cachep;
  43
  44#ifdef CONFIG_F2FS_FAULT_INJECTION
  45
  46const char *f2fs_fault_name[FAULT_MAX] = {
  47	[FAULT_KMALLOC]		= "kmalloc",
  48	[FAULT_KVMALLOC]	= "kvmalloc",
  49	[FAULT_PAGE_ALLOC]	= "page alloc",
  50	[FAULT_PAGE_GET]	= "page get",
  51	[FAULT_ALLOC_NID]	= "alloc nid",
  52	[FAULT_ORPHAN]		= "orphan",
  53	[FAULT_BLOCK]		= "no more block",
  54	[FAULT_DIR_DEPTH]	= "too big dir depth",
  55	[FAULT_EVICT_INODE]	= "evict_inode fail",
  56	[FAULT_TRUNCATE]	= "truncate fail",
  57	[FAULT_READ_IO]		= "read IO error",
  58	[FAULT_CHECKPOINT]	= "checkpoint error",
  59	[FAULT_DISCARD]		= "discard error",
  60	[FAULT_WRITE_IO]	= "write IO error",
  61	[FAULT_SLAB_ALLOC]	= "slab alloc",
  62	[FAULT_DQUOT_INIT]	= "dquot initialize",
  63	[FAULT_LOCK_OP]		= "lock_op",
  64	[FAULT_BLKADDR]		= "invalid blkaddr",
  65};
  66
  67void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
  68							unsigned int type)
  69{
  70	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
  71
  72	if (rate) {
  73		atomic_set(&ffi->inject_ops, 0);
  74		ffi->inject_rate = rate;
  75	}
  76
  77	if (type)
  78		ffi->inject_type = type;
  79
  80	if (!rate && !type)
  81		memset(ffi, 0, sizeof(struct f2fs_fault_info));
  82}
  83#endif
  84
  85/* f2fs-wide shrinker description */
  86static struct shrinker *f2fs_shrinker_info;
  87
  88static int __init f2fs_init_shrinker(void)
  89{
  90	f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker");
  91	if (!f2fs_shrinker_info)
  92		return -ENOMEM;
  93
  94	f2fs_shrinker_info->count_objects = f2fs_shrink_count;
  95	f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
  96
  97	shrinker_register(f2fs_shrinker_info);
  98
  99	return 0;
 100}
 101
 102static void f2fs_exit_shrinker(void)
 103{
 104	shrinker_free(f2fs_shrinker_info);
 105}
 106
 107enum {
 108	Opt_gc_background,
 109	Opt_disable_roll_forward,
 110	Opt_norecovery,
 111	Opt_discard,
 112	Opt_nodiscard,
 113	Opt_noheap,
 114	Opt_heap,
 115	Opt_user_xattr,
 116	Opt_nouser_xattr,
 117	Opt_acl,
 118	Opt_noacl,
 119	Opt_active_logs,
 120	Opt_disable_ext_identify,
 121	Opt_inline_xattr,
 122	Opt_noinline_xattr,
 123	Opt_inline_xattr_size,
 124	Opt_inline_data,
 125	Opt_inline_dentry,
 126	Opt_noinline_dentry,
 127	Opt_flush_merge,
 128	Opt_noflush_merge,
 129	Opt_barrier,
 130	Opt_nobarrier,
 131	Opt_fastboot,
 132	Opt_extent_cache,
 133	Opt_noextent_cache,
 134	Opt_noinline_data,
 135	Opt_data_flush,
 136	Opt_reserve_root,
 137	Opt_resgid,
 138	Opt_resuid,
 139	Opt_mode,
 140	Opt_io_size_bits,
 141	Opt_fault_injection,
 142	Opt_fault_type,
 143	Opt_lazytime,
 144	Opt_nolazytime,
 145	Opt_quota,
 146	Opt_noquota,
 147	Opt_usrquota,
 148	Opt_grpquota,
 149	Opt_prjquota,
 150	Opt_usrjquota,
 151	Opt_grpjquota,
 152	Opt_prjjquota,
 153	Opt_offusrjquota,
 154	Opt_offgrpjquota,
 155	Opt_offprjjquota,
 156	Opt_jqfmt_vfsold,
 157	Opt_jqfmt_vfsv0,
 158	Opt_jqfmt_vfsv1,
 159	Opt_alloc,
 160	Opt_fsync,
 161	Opt_test_dummy_encryption,
 162	Opt_inlinecrypt,
 163	Opt_checkpoint_disable,
 164	Opt_checkpoint_disable_cap,
 165	Opt_checkpoint_disable_cap_perc,
 166	Opt_checkpoint_enable,
 167	Opt_checkpoint_merge,
 168	Opt_nocheckpoint_merge,
 169	Opt_compress_algorithm,
 170	Opt_compress_log_size,
 171	Opt_compress_extension,
 172	Opt_nocompress_extension,
 173	Opt_compress_chksum,
 174	Opt_compress_mode,
 175	Opt_compress_cache,
 176	Opt_atgc,
 177	Opt_gc_merge,
 178	Opt_nogc_merge,
 179	Opt_discard_unit,
 180	Opt_memory_mode,
 181	Opt_age_extent_cache,
 182	Opt_errors,
 183	Opt_err,
 184};
 185
 186static match_table_t f2fs_tokens = {
 187	{Opt_gc_background, "background_gc=%s"},
 188	{Opt_disable_roll_forward, "disable_roll_forward"},
 189	{Opt_norecovery, "norecovery"},
 190	{Opt_discard, "discard"},
 191	{Opt_nodiscard, "nodiscard"},
 192	{Opt_noheap, "no_heap"},
 193	{Opt_heap, "heap"},
 194	{Opt_user_xattr, "user_xattr"},
 195	{Opt_nouser_xattr, "nouser_xattr"},
 196	{Opt_acl, "acl"},
 197	{Opt_noacl, "noacl"},
 198	{Opt_active_logs, "active_logs=%u"},
 199	{Opt_disable_ext_identify, "disable_ext_identify"},
 200	{Opt_inline_xattr, "inline_xattr"},
 201	{Opt_noinline_xattr, "noinline_xattr"},
 202	{Opt_inline_xattr_size, "inline_xattr_size=%u"},
 203	{Opt_inline_data, "inline_data"},
 204	{Opt_inline_dentry, "inline_dentry"},
 205	{Opt_noinline_dentry, "noinline_dentry"},
 206	{Opt_flush_merge, "flush_merge"},
 207	{Opt_noflush_merge, "noflush_merge"},
 208	{Opt_barrier, "barrier"},
 209	{Opt_nobarrier, "nobarrier"},
 210	{Opt_fastboot, "fastboot"},
 211	{Opt_extent_cache, "extent_cache"},
 212	{Opt_noextent_cache, "noextent_cache"},
 213	{Opt_noinline_data, "noinline_data"},
 214	{Opt_data_flush, "data_flush"},
 215	{Opt_reserve_root, "reserve_root=%u"},
 216	{Opt_resgid, "resgid=%u"},
 217	{Opt_resuid, "resuid=%u"},
 218	{Opt_mode, "mode=%s"},
 219	{Opt_io_size_bits, "io_bits=%u"},
 220	{Opt_fault_injection, "fault_injection=%u"},
 221	{Opt_fault_type, "fault_type=%u"},
 222	{Opt_lazytime, "lazytime"},
 223	{Opt_nolazytime, "nolazytime"},
 224	{Opt_quota, "quota"},
 225	{Opt_noquota, "noquota"},
 226	{Opt_usrquota, "usrquota"},
 227	{Opt_grpquota, "grpquota"},
 228	{Opt_prjquota, "prjquota"},
 229	{Opt_usrjquota, "usrjquota=%s"},
 230	{Opt_grpjquota, "grpjquota=%s"},
 231	{Opt_prjjquota, "prjjquota=%s"},
 232	{Opt_offusrjquota, "usrjquota="},
 233	{Opt_offgrpjquota, "grpjquota="},
 234	{Opt_offprjjquota, "prjjquota="},
 235	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
 236	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
 237	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
 238	{Opt_alloc, "alloc_mode=%s"},
 239	{Opt_fsync, "fsync_mode=%s"},
 240	{Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
 241	{Opt_test_dummy_encryption, "test_dummy_encryption"},
 242	{Opt_inlinecrypt, "inlinecrypt"},
 243	{Opt_checkpoint_disable, "checkpoint=disable"},
 244	{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
 245	{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
 246	{Opt_checkpoint_enable, "checkpoint=enable"},
 247	{Opt_checkpoint_merge, "checkpoint_merge"},
 248	{Opt_nocheckpoint_merge, "nocheckpoint_merge"},
 249	{Opt_compress_algorithm, "compress_algorithm=%s"},
 250	{Opt_compress_log_size, "compress_log_size=%u"},
 251	{Opt_compress_extension, "compress_extension=%s"},
 252	{Opt_nocompress_extension, "nocompress_extension=%s"},
 253	{Opt_compress_chksum, "compress_chksum"},
 254	{Opt_compress_mode, "compress_mode=%s"},
 255	{Opt_compress_cache, "compress_cache"},
 256	{Opt_atgc, "atgc"},
 257	{Opt_gc_merge, "gc_merge"},
 258	{Opt_nogc_merge, "nogc_merge"},
 259	{Opt_discard_unit, "discard_unit=%s"},
 260	{Opt_memory_mode, "memory=%s"},
 261	{Opt_age_extent_cache, "age_extent_cache"},
 262	{Opt_errors, "errors=%s"},
 263	{Opt_err, NULL},
 264};
 265
 266void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
 267{
 268	struct va_format vaf;
 269	va_list args;
 270	int level;
 271
 272	va_start(args, fmt);
 273
 274	level = printk_get_level(fmt);
 275	vaf.fmt = printk_skip_level(fmt);
 276	vaf.va = &args;
 277	printk("%c%cF2FS-fs (%s): %pV\n",
 278	       KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
 279
 280	va_end(args);
 281}
 282
 283#if IS_ENABLED(CONFIG_UNICODE)
 284static const struct f2fs_sb_encodings {
 285	__u16 magic;
 286	char *name;
 287	unsigned int version;
 288} f2fs_sb_encoding_map[] = {
 289	{F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
 290};
 291
 292static const struct f2fs_sb_encodings *
 293f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
 294{
 295	__u16 magic = le16_to_cpu(sb->s_encoding);
 296	int i;
 297
 298	for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
 299		if (magic == f2fs_sb_encoding_map[i].magic)
 300			return &f2fs_sb_encoding_map[i];
 301
 302	return NULL;
 303}
 304
 305struct kmem_cache *f2fs_cf_name_slab;
 306static int __init f2fs_create_casefold_cache(void)
 307{
 308	f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
 309							F2FS_NAME_LEN);
 310	return f2fs_cf_name_slab ? 0 : -ENOMEM;
 311}
 312
 313static void f2fs_destroy_casefold_cache(void)
 314{
 315	kmem_cache_destroy(f2fs_cf_name_slab);
 316}
 317#else
 318static int __init f2fs_create_casefold_cache(void) { return 0; }
 319static void f2fs_destroy_casefold_cache(void) { }
 320#endif
 321
 322static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
 323{
 324	block_t limit = min((sbi->user_block_count >> 3),
 325			sbi->user_block_count - sbi->reserved_blocks);
 326
 327	/* limit is 12.5% */
 328	if (test_opt(sbi, RESERVE_ROOT) &&
 329			F2FS_OPTION(sbi).root_reserved_blocks > limit) {
 330		F2FS_OPTION(sbi).root_reserved_blocks = limit;
 331		f2fs_info(sbi, "Reduce reserved blocks for root = %u",
 332			  F2FS_OPTION(sbi).root_reserved_blocks);
 333	}
 334	if (!test_opt(sbi, RESERVE_ROOT) &&
 335		(!uid_eq(F2FS_OPTION(sbi).s_resuid,
 336				make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
 337		!gid_eq(F2FS_OPTION(sbi).s_resgid,
 338				make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
 339		f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
 340			  from_kuid_munged(&init_user_ns,
 341					   F2FS_OPTION(sbi).s_resuid),
 342			  from_kgid_munged(&init_user_ns,
 343					   F2FS_OPTION(sbi).s_resgid));
 344}
 345
 346static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
 347{
 348	unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
 349	unsigned int avg_vblocks;
 350	unsigned int wanted_reserved_segments;
 351	block_t avail_user_block_count;
 352
 353	if (!F2FS_IO_ALIGNED(sbi))
 354		return 0;
 355
 356	/* average valid block count in section in worst case */
 357	avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
 358
 359	/*
 360	 * we need enough free space when migrating one section in worst case
 361	 */
 362	wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
 363						reserved_segments(sbi);
 364	wanted_reserved_segments -= reserved_segments(sbi);
 365
 366	avail_user_block_count = sbi->user_block_count -
 367				sbi->current_reserved_blocks -
 368				F2FS_OPTION(sbi).root_reserved_blocks;
 369
 370	if (wanted_reserved_segments * sbi->blocks_per_seg >
 371					avail_user_block_count) {
 372		f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
 373			wanted_reserved_segments,
 374			avail_user_block_count >> sbi->log_blocks_per_seg);
 375		return -ENOSPC;
 376	}
 377
 378	SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
 379
 380	f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
 381			 wanted_reserved_segments);
 382
 383	return 0;
 384}
 385
 386static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
 387{
 388	if (!F2FS_OPTION(sbi).unusable_cap_perc)
 389		return;
 390
 391	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
 392		F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
 393	else
 394		F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
 395					F2FS_OPTION(sbi).unusable_cap_perc;
 396
 397	f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
 398			F2FS_OPTION(sbi).unusable_cap,
 399			F2FS_OPTION(sbi).unusable_cap_perc);
 400}
 401
 402static void init_once(void *foo)
 403{
 404	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
 405
 406	inode_init_once(&fi->vfs_inode);
 407}
 408
 409#ifdef CONFIG_QUOTA
 410static const char * const quotatypes[] = INITQFNAMES;
 411#define QTYPE2NAME(t) (quotatypes[t])
 412static int f2fs_set_qf_name(struct super_block *sb, int qtype,
 413							substring_t *args)
 414{
 415	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 416	char *qname;
 417	int ret = -EINVAL;
 418
 419	if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
 420		f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
 421		return -EINVAL;
 422	}
 423	if (f2fs_sb_has_quota_ino(sbi)) {
 424		f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
 425		return 0;
 426	}
 427
 428	qname = match_strdup(args);
 429	if (!qname) {
 430		f2fs_err(sbi, "Not enough memory for storing quotafile name");
 431		return -ENOMEM;
 432	}
 433	if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
 434		if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
 435			ret = 0;
 436		else
 437			f2fs_err(sbi, "%s quota file already specified",
 438				 QTYPE2NAME(qtype));
 439		goto errout;
 440	}
 441	if (strchr(qname, '/')) {
 442		f2fs_err(sbi, "quotafile must be on filesystem root");
 443		goto errout;
 444	}
 445	F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
 446	set_opt(sbi, QUOTA);
 447	return 0;
 448errout:
 449	kfree(qname);
 450	return ret;
 451}
 452
 453static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
 454{
 455	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 456
 457	if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
 458		f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
 459		return -EINVAL;
 460	}
 461	kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
 462	F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
 463	return 0;
 464}
 465
 466static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
 467{
 468	/*
 469	 * We do the test below only for project quotas. 'usrquota' and
 470	 * 'grpquota' mount options are allowed even without quota feature
 471	 * to support legacy quotas in quota files.
 472	 */
 473	if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
 474		f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
 475		return -1;
 476	}
 477	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
 478			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
 479			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
 480		if (test_opt(sbi, USRQUOTA) &&
 481				F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
 482			clear_opt(sbi, USRQUOTA);
 483
 484		if (test_opt(sbi, GRPQUOTA) &&
 485				F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
 486			clear_opt(sbi, GRPQUOTA);
 487
 488		if (test_opt(sbi, PRJQUOTA) &&
 489				F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
 490			clear_opt(sbi, PRJQUOTA);
 491
 492		if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
 493				test_opt(sbi, PRJQUOTA)) {
 494			f2fs_err(sbi, "old and new quota format mixing");
 495			return -1;
 496		}
 497
 498		if (!F2FS_OPTION(sbi).s_jquota_fmt) {
 499			f2fs_err(sbi, "journaled quota format not specified");
 500			return -1;
 501		}
 502	}
 503
 504	if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
 505		f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
 506		F2FS_OPTION(sbi).s_jquota_fmt = 0;
 507	}
 508	return 0;
 509}
 510#endif
 511
 512static int f2fs_set_test_dummy_encryption(struct super_block *sb,
 513					  const char *opt,
 514					  const substring_t *arg,
 515					  bool is_remount)
 516{
 517	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 518	struct fs_parameter param = {
 519		.type = fs_value_is_string,
 520		.string = arg->from ? arg->from : "",
 521	};
 522	struct fscrypt_dummy_policy *policy =
 523		&F2FS_OPTION(sbi).dummy_enc_policy;
 524	int err;
 525
 526	if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
 527		f2fs_warn(sbi, "test_dummy_encryption option not supported");
 528		return -EINVAL;
 529	}
 530
 531	if (!f2fs_sb_has_encrypt(sbi)) {
 532		f2fs_err(sbi, "Encrypt feature is off");
 533		return -EINVAL;
 534	}
 535
 536	/*
 537	 * This mount option is just for testing, and it's not worthwhile to
 538	 * implement the extra complexity (e.g. RCU protection) that would be
 539	 * needed to allow it to be set or changed during remount.  We do allow
 540	 * it to be specified during remount, but only if there is no change.
 541	 */
 542	if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
 543		f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
 544		return -EINVAL;
 545	}
 546
 547	err = fscrypt_parse_test_dummy_encryption(&param, policy);
 548	if (err) {
 549		if (err == -EEXIST)
 550			f2fs_warn(sbi,
 551				  "Can't change test_dummy_encryption on remount");
 552		else if (err == -EINVAL)
 553			f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
 554				  opt);
 555		else
 556			f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
 557				  opt, err);
 558		return -EINVAL;
 559	}
 560	f2fs_warn(sbi, "Test dummy encryption mode enabled");
 561	return 0;
 562}
 563
 564#ifdef CONFIG_F2FS_FS_COMPRESSION
 565static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
 566					const char *new_ext, bool is_ext)
 567{
 568	unsigned char (*ext)[F2FS_EXTENSION_LEN];
 569	int ext_cnt;
 570	int i;
 571
 572	if (is_ext) {
 573		ext = F2FS_OPTION(sbi).extensions;
 574		ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
 575	} else {
 576		ext = F2FS_OPTION(sbi).noextensions;
 577		ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
 578	}
 579
 580	for (i = 0; i < ext_cnt; i++) {
 581		if (!strcasecmp(new_ext, ext[i]))
 582			return true;
 583	}
 584
 585	return false;
 586}
 587
 588/*
 589 * 1. The same extension name cannot not appear in both compress and non-compress extension
 590 * at the same time.
 591 * 2. If the compress extension specifies all files, the types specified by the non-compress
 592 * extension will be treated as special cases and will not be compressed.
 593 * 3. Don't allow the non-compress extension specifies all files.
 594 */
 595static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
 596{
 597	unsigned char (*ext)[F2FS_EXTENSION_LEN];
 598	unsigned char (*noext)[F2FS_EXTENSION_LEN];
 599	int ext_cnt, noext_cnt, index = 0, no_index = 0;
 600
 601	ext = F2FS_OPTION(sbi).extensions;
 602	ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
 603	noext = F2FS_OPTION(sbi).noextensions;
 604	noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
 605
 606	if (!noext_cnt)
 607		return 0;
 608
 609	for (no_index = 0; no_index < noext_cnt; no_index++) {
 610		if (!strcasecmp("*", noext[no_index])) {
 611			f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
 612			return -EINVAL;
 613		}
 614		for (index = 0; index < ext_cnt; index++) {
 615			if (!strcasecmp(ext[index], noext[no_index])) {
 616				f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
 617						ext[index]);
 618				return -EINVAL;
 619			}
 620		}
 621	}
 622	return 0;
 623}
 624
 625#ifdef CONFIG_F2FS_FS_LZ4
 626static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
 627{
 628#ifdef CONFIG_F2FS_FS_LZ4HC
 629	unsigned int level;
 630
 631	if (strlen(str) == 3) {
 632		F2FS_OPTION(sbi).compress_level = 0;
 633		return 0;
 634	}
 635
 636	str += 3;
 637
 638	if (str[0] != ':') {
 639		f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
 640		return -EINVAL;
 641	}
 642	if (kstrtouint(str + 1, 10, &level))
 643		return -EINVAL;
 644
 645	if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
 646		f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
 647		return -EINVAL;
 648	}
 649
 650	F2FS_OPTION(sbi).compress_level = level;
 651	return 0;
 652#else
 653	if (strlen(str) == 3) {
 654		F2FS_OPTION(sbi).compress_level = 0;
 655		return 0;
 656	}
 657	f2fs_info(sbi, "kernel doesn't support lz4hc compression");
 658	return -EINVAL;
 659#endif
 660}
 661#endif
 662
 663#ifdef CONFIG_F2FS_FS_ZSTD
 664static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
 665{
 666	unsigned int level;
 667	int len = 4;
 668
 669	if (strlen(str) == len) {
 670		F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
 671		return 0;
 672	}
 673
 674	str += len;
 675
 676	if (str[0] != ':') {
 677		f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
 678		return -EINVAL;
 679	}
 680	if (kstrtouint(str + 1, 10, &level))
 681		return -EINVAL;
 682
 683	if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
 684		f2fs_info(sbi, "invalid zstd compress level: %d", level);
 685		return -EINVAL;
 686	}
 687
 688	F2FS_OPTION(sbi).compress_level = level;
 689	return 0;
 690}
 691#endif
 692#endif
 693
 694static int parse_options(struct super_block *sb, char *options, bool is_remount)
 695{
 696	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 697	substring_t args[MAX_OPT_ARGS];
 698#ifdef CONFIG_F2FS_FS_COMPRESSION
 699	unsigned char (*ext)[F2FS_EXTENSION_LEN];
 700	unsigned char (*noext)[F2FS_EXTENSION_LEN];
 701	int ext_cnt, noext_cnt;
 702#endif
 703	char *p, *name;
 704	int arg = 0;
 705	kuid_t uid;
 706	kgid_t gid;
 707	int ret;
 708
 709	if (!options)
 710		goto default_check;
 711
 712	while ((p = strsep(&options, ",")) != NULL) {
 713		int token;
 714
 715		if (!*p)
 716			continue;
 717		/*
 718		 * Initialize args struct so we know whether arg was
 719		 * found; some options take optional arguments.
 720		 */
 721		args[0].to = args[0].from = NULL;
 722		token = match_token(p, f2fs_tokens, args);
 723
 724		switch (token) {
 725		case Opt_gc_background:
 726			name = match_strdup(&args[0]);
 727
 728			if (!name)
 729				return -ENOMEM;
 730			if (!strcmp(name, "on")) {
 731				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
 732			} else if (!strcmp(name, "off")) {
 733				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
 734			} else if (!strcmp(name, "sync")) {
 735				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
 736			} else {
 737				kfree(name);
 738				return -EINVAL;
 739			}
 740			kfree(name);
 741			break;
 742		case Opt_disable_roll_forward:
 743			set_opt(sbi, DISABLE_ROLL_FORWARD);
 744			break;
 745		case Opt_norecovery:
 746			/* this option mounts f2fs with ro */
 747			set_opt(sbi, NORECOVERY);
 748			if (!f2fs_readonly(sb))
 749				return -EINVAL;
 750			break;
 751		case Opt_discard:
 752			if (!f2fs_hw_support_discard(sbi)) {
 753				f2fs_warn(sbi, "device does not support discard");
 754				break;
 755			}
 756			set_opt(sbi, DISCARD);
 757			break;
 758		case Opt_nodiscard:
 759			if (f2fs_hw_should_discard(sbi)) {
 760				f2fs_warn(sbi, "discard is required for zoned block devices");
 761				return -EINVAL;
 762			}
 763			clear_opt(sbi, DISCARD);
 764			break;
 765		case Opt_noheap:
 766			set_opt(sbi, NOHEAP);
 767			break;
 768		case Opt_heap:
 769			clear_opt(sbi, NOHEAP);
 770			break;
 771#ifdef CONFIG_F2FS_FS_XATTR
 772		case Opt_user_xattr:
 773			set_opt(sbi, XATTR_USER);
 774			break;
 775		case Opt_nouser_xattr:
 776			clear_opt(sbi, XATTR_USER);
 777			break;
 778		case Opt_inline_xattr:
 779			set_opt(sbi, INLINE_XATTR);
 780			break;
 781		case Opt_noinline_xattr:
 782			clear_opt(sbi, INLINE_XATTR);
 783			break;
 784		case Opt_inline_xattr_size:
 785			if (args->from && match_int(args, &arg))
 786				return -EINVAL;
 787			set_opt(sbi, INLINE_XATTR_SIZE);
 788			F2FS_OPTION(sbi).inline_xattr_size = arg;
 789			break;
 790#else
 791		case Opt_user_xattr:
 792			f2fs_info(sbi, "user_xattr options not supported");
 793			break;
 794		case Opt_nouser_xattr:
 795			f2fs_info(sbi, "nouser_xattr options not supported");
 796			break;
 797		case Opt_inline_xattr:
 798			f2fs_info(sbi, "inline_xattr options not supported");
 799			break;
 800		case Opt_noinline_xattr:
 801			f2fs_info(sbi, "noinline_xattr options not supported");
 802			break;
 803#endif
 804#ifdef CONFIG_F2FS_FS_POSIX_ACL
 805		case Opt_acl:
 806			set_opt(sbi, POSIX_ACL);
 807			break;
 808		case Opt_noacl:
 809			clear_opt(sbi, POSIX_ACL);
 810			break;
 811#else
 812		case Opt_acl:
 813			f2fs_info(sbi, "acl options not supported");
 814			break;
 815		case Opt_noacl:
 816			f2fs_info(sbi, "noacl options not supported");
 817			break;
 818#endif
 819		case Opt_active_logs:
 820			if (args->from && match_int(args, &arg))
 821				return -EINVAL;
 822			if (arg != 2 && arg != 4 &&
 823				arg != NR_CURSEG_PERSIST_TYPE)
 824				return -EINVAL;
 825			F2FS_OPTION(sbi).active_logs = arg;
 826			break;
 827		case Opt_disable_ext_identify:
 828			set_opt(sbi, DISABLE_EXT_IDENTIFY);
 829			break;
 830		case Opt_inline_data:
 831			set_opt(sbi, INLINE_DATA);
 832			break;
 833		case Opt_inline_dentry:
 834			set_opt(sbi, INLINE_DENTRY);
 835			break;
 836		case Opt_noinline_dentry:
 837			clear_opt(sbi, INLINE_DENTRY);
 838			break;
 839		case Opt_flush_merge:
 840			set_opt(sbi, FLUSH_MERGE);
 841			break;
 842		case Opt_noflush_merge:
 843			clear_opt(sbi, FLUSH_MERGE);
 844			break;
 845		case Opt_nobarrier:
 846			set_opt(sbi, NOBARRIER);
 847			break;
 848		case Opt_barrier:
 849			clear_opt(sbi, NOBARRIER);
 850			break;
 851		case Opt_fastboot:
 852			set_opt(sbi, FASTBOOT);
 853			break;
 854		case Opt_extent_cache:
 855			set_opt(sbi, READ_EXTENT_CACHE);
 856			break;
 857		case Opt_noextent_cache:
 858			clear_opt(sbi, READ_EXTENT_CACHE);
 859			break;
 860		case Opt_noinline_data:
 861			clear_opt(sbi, INLINE_DATA);
 862			break;
 863		case Opt_data_flush:
 864			set_opt(sbi, DATA_FLUSH);
 865			break;
 866		case Opt_reserve_root:
 867			if (args->from && match_int(args, &arg))
 868				return -EINVAL;
 869			if (test_opt(sbi, RESERVE_ROOT)) {
 870				f2fs_info(sbi, "Preserve previous reserve_root=%u",
 871					  F2FS_OPTION(sbi).root_reserved_blocks);
 872			} else {
 873				F2FS_OPTION(sbi).root_reserved_blocks = arg;
 874				set_opt(sbi, RESERVE_ROOT);
 875			}
 876			break;
 877		case Opt_resuid:
 878			if (args->from && match_int(args, &arg))
 879				return -EINVAL;
 880			uid = make_kuid(current_user_ns(), arg);
 881			if (!uid_valid(uid)) {
 882				f2fs_err(sbi, "Invalid uid value %d", arg);
 883				return -EINVAL;
 884			}
 885			F2FS_OPTION(sbi).s_resuid = uid;
 886			break;
 887		case Opt_resgid:
 888			if (args->from && match_int(args, &arg))
 889				return -EINVAL;
 890			gid = make_kgid(current_user_ns(), arg);
 891			if (!gid_valid(gid)) {
 892				f2fs_err(sbi, "Invalid gid value %d", arg);
 893				return -EINVAL;
 894			}
 895			F2FS_OPTION(sbi).s_resgid = gid;
 896			break;
 897		case Opt_mode:
 898			name = match_strdup(&args[0]);
 899
 900			if (!name)
 901				return -ENOMEM;
 902			if (!strcmp(name, "adaptive")) {
 903				F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
 904			} else if (!strcmp(name, "lfs")) {
 905				F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
 906			} else if (!strcmp(name, "fragment:segment")) {
 907				F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
 908			} else if (!strcmp(name, "fragment:block")) {
 909				F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
 910			} else {
 911				kfree(name);
 912				return -EINVAL;
 913			}
 914			kfree(name);
 915			break;
 916		case Opt_io_size_bits:
 917			if (args->from && match_int(args, &arg))
 918				return -EINVAL;
 919			if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
 920				f2fs_warn(sbi, "Not support %ld, larger than %d",
 921					BIT(arg), BIO_MAX_VECS);
 922				return -EINVAL;
 923			}
 924			F2FS_OPTION(sbi).write_io_size_bits = arg;
 925			break;
 926#ifdef CONFIG_F2FS_FAULT_INJECTION
 927		case Opt_fault_injection:
 928			if (args->from && match_int(args, &arg))
 929				return -EINVAL;
 930			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
 931			set_opt(sbi, FAULT_INJECTION);
 932			break;
 933
 934		case Opt_fault_type:
 935			if (args->from && match_int(args, &arg))
 936				return -EINVAL;
 937			f2fs_build_fault_attr(sbi, 0, arg);
 938			set_opt(sbi, FAULT_INJECTION);
 939			break;
 940#else
 941		case Opt_fault_injection:
 942			f2fs_info(sbi, "fault_injection options not supported");
 943			break;
 944
 945		case Opt_fault_type:
 946			f2fs_info(sbi, "fault_type options not supported");
 947			break;
 948#endif
 949		case Opt_lazytime:
 950			sb->s_flags |= SB_LAZYTIME;
 951			break;
 952		case Opt_nolazytime:
 953			sb->s_flags &= ~SB_LAZYTIME;
 954			break;
 955#ifdef CONFIG_QUOTA
 956		case Opt_quota:
 957		case Opt_usrquota:
 958			set_opt(sbi, USRQUOTA);
 959			break;
 960		case Opt_grpquota:
 961			set_opt(sbi, GRPQUOTA);
 962			break;
 963		case Opt_prjquota:
 964			set_opt(sbi, PRJQUOTA);
 965			break;
 966		case Opt_usrjquota:
 967			ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
 968			if (ret)
 969				return ret;
 970			break;
 971		case Opt_grpjquota:
 972			ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
 973			if (ret)
 974				return ret;
 975			break;
 976		case Opt_prjjquota:
 977			ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
 978			if (ret)
 979				return ret;
 980			break;
 981		case Opt_offusrjquota:
 982			ret = f2fs_clear_qf_name(sb, USRQUOTA);
 983			if (ret)
 984				return ret;
 985			break;
 986		case Opt_offgrpjquota:
 987			ret = f2fs_clear_qf_name(sb, GRPQUOTA);
 988			if (ret)
 989				return ret;
 990			break;
 991		case Opt_offprjjquota:
 992			ret = f2fs_clear_qf_name(sb, PRJQUOTA);
 993			if (ret)
 994				return ret;
 995			break;
 996		case Opt_jqfmt_vfsold:
 997			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
 998			break;
 999		case Opt_jqfmt_vfsv0:
1000			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
1001			break;
1002		case Opt_jqfmt_vfsv1:
1003			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
1004			break;
1005		case Opt_noquota:
1006			clear_opt(sbi, QUOTA);
1007			clear_opt(sbi, USRQUOTA);
1008			clear_opt(sbi, GRPQUOTA);
1009			clear_opt(sbi, PRJQUOTA);
1010			break;
1011#else
1012		case Opt_quota:
1013		case Opt_usrquota:
1014		case Opt_grpquota:
1015		case Opt_prjquota:
1016		case Opt_usrjquota:
1017		case Opt_grpjquota:
1018		case Opt_prjjquota:
1019		case Opt_offusrjquota:
1020		case Opt_offgrpjquota:
1021		case Opt_offprjjquota:
1022		case Opt_jqfmt_vfsold:
1023		case Opt_jqfmt_vfsv0:
1024		case Opt_jqfmt_vfsv1:
1025		case Opt_noquota:
1026			f2fs_info(sbi, "quota operations not supported");
1027			break;
1028#endif
1029		case Opt_alloc:
1030			name = match_strdup(&args[0]);
1031			if (!name)
1032				return -ENOMEM;
1033
1034			if (!strcmp(name, "default")) {
1035				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1036			} else if (!strcmp(name, "reuse")) {
1037				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
1038			} else {
1039				kfree(name);
1040				return -EINVAL;
1041			}
1042			kfree(name);
1043			break;
1044		case Opt_fsync:
1045			name = match_strdup(&args[0]);
1046			if (!name)
1047				return -ENOMEM;
1048			if (!strcmp(name, "posix")) {
1049				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1050			} else if (!strcmp(name, "strict")) {
1051				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
1052			} else if (!strcmp(name, "nobarrier")) {
1053				F2FS_OPTION(sbi).fsync_mode =
1054							FSYNC_MODE_NOBARRIER;
1055			} else {
1056				kfree(name);
1057				return -EINVAL;
1058			}
1059			kfree(name);
1060			break;
1061		case Opt_test_dummy_encryption:
1062			ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
1063							     is_remount);
1064			if (ret)
1065				return ret;
1066			break;
1067		case Opt_inlinecrypt:
1068#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
1069			sb->s_flags |= SB_INLINECRYPT;
1070#else
1071			f2fs_info(sbi, "inline encryption not supported");
1072#endif
1073			break;
1074		case Opt_checkpoint_disable_cap_perc:
1075			if (args->from && match_int(args, &arg))
1076				return -EINVAL;
1077			if (arg < 0 || arg > 100)
1078				return -EINVAL;
1079			F2FS_OPTION(sbi).unusable_cap_perc = arg;
1080			set_opt(sbi, DISABLE_CHECKPOINT);
1081			break;
1082		case Opt_checkpoint_disable_cap:
1083			if (args->from && match_int(args, &arg))
1084				return -EINVAL;
1085			F2FS_OPTION(sbi).unusable_cap = arg;
1086			set_opt(sbi, DISABLE_CHECKPOINT);
1087			break;
1088		case Opt_checkpoint_disable:
1089			set_opt(sbi, DISABLE_CHECKPOINT);
1090			break;
1091		case Opt_checkpoint_enable:
1092			clear_opt(sbi, DISABLE_CHECKPOINT);
1093			break;
1094		case Opt_checkpoint_merge:
1095			set_opt(sbi, MERGE_CHECKPOINT);
1096			break;
1097		case Opt_nocheckpoint_merge:
1098			clear_opt(sbi, MERGE_CHECKPOINT);
1099			break;
1100#ifdef CONFIG_F2FS_FS_COMPRESSION
1101		case Opt_compress_algorithm:
1102			if (!f2fs_sb_has_compression(sbi)) {
1103				f2fs_info(sbi, "Image doesn't support compression");
1104				break;
1105			}
1106			name = match_strdup(&args[0]);
1107			if (!name)
1108				return -ENOMEM;
1109			if (!strcmp(name, "lzo")) {
1110#ifdef CONFIG_F2FS_FS_LZO
1111				F2FS_OPTION(sbi).compress_level = 0;
1112				F2FS_OPTION(sbi).compress_algorithm =
1113								COMPRESS_LZO;
1114#else
1115				f2fs_info(sbi, "kernel doesn't support lzo compression");
1116#endif
1117			} else if (!strncmp(name, "lz4", 3)) {
1118#ifdef CONFIG_F2FS_FS_LZ4
1119				ret = f2fs_set_lz4hc_level(sbi, name);
1120				if (ret) {
1121					kfree(name);
1122					return -EINVAL;
1123				}
1124				F2FS_OPTION(sbi).compress_algorithm =
1125								COMPRESS_LZ4;
1126#else
1127				f2fs_info(sbi, "kernel doesn't support lz4 compression");
1128#endif
1129			} else if (!strncmp(name, "zstd", 4)) {
1130#ifdef CONFIG_F2FS_FS_ZSTD
1131				ret = f2fs_set_zstd_level(sbi, name);
1132				if (ret) {
1133					kfree(name);
1134					return -EINVAL;
1135				}
1136				F2FS_OPTION(sbi).compress_algorithm =
1137								COMPRESS_ZSTD;
1138#else
1139				f2fs_info(sbi, "kernel doesn't support zstd compression");
1140#endif
1141			} else if (!strcmp(name, "lzo-rle")) {
1142#ifdef CONFIG_F2FS_FS_LZORLE
1143				F2FS_OPTION(sbi).compress_level = 0;
1144				F2FS_OPTION(sbi).compress_algorithm =
1145								COMPRESS_LZORLE;
1146#else
1147				f2fs_info(sbi, "kernel doesn't support lzorle compression");
1148#endif
1149			} else {
1150				kfree(name);
1151				return -EINVAL;
1152			}
1153			kfree(name);
1154			break;
1155		case Opt_compress_log_size:
1156			if (!f2fs_sb_has_compression(sbi)) {
1157				f2fs_info(sbi, "Image doesn't support compression");
1158				break;
1159			}
1160			if (args->from && match_int(args, &arg))
1161				return -EINVAL;
1162			if (arg < MIN_COMPRESS_LOG_SIZE ||
1163				arg > MAX_COMPRESS_LOG_SIZE) {
1164				f2fs_err(sbi,
1165					"Compress cluster log size is out of range");
1166				return -EINVAL;
1167			}
1168			F2FS_OPTION(sbi).compress_log_size = arg;
1169			break;
1170		case Opt_compress_extension:
1171			if (!f2fs_sb_has_compression(sbi)) {
1172				f2fs_info(sbi, "Image doesn't support compression");
1173				break;
1174			}
1175			name = match_strdup(&args[0]);
1176			if (!name)
1177				return -ENOMEM;
1178
1179			ext = F2FS_OPTION(sbi).extensions;
1180			ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1181
1182			if (strlen(name) >= F2FS_EXTENSION_LEN ||
1183				ext_cnt >= COMPRESS_EXT_NUM) {
1184				f2fs_err(sbi,
1185					"invalid extension length/number");
1186				kfree(name);
1187				return -EINVAL;
1188			}
1189
1190			if (is_compress_extension_exist(sbi, name, true)) {
1191				kfree(name);
1192				break;
1193			}
1194
1195			strcpy(ext[ext_cnt], name);
1196			F2FS_OPTION(sbi).compress_ext_cnt++;
1197			kfree(name);
1198			break;
1199		case Opt_nocompress_extension:
1200			if (!f2fs_sb_has_compression(sbi)) {
1201				f2fs_info(sbi, "Image doesn't support compression");
1202				break;
1203			}
1204			name = match_strdup(&args[0]);
1205			if (!name)
1206				return -ENOMEM;
1207
1208			noext = F2FS_OPTION(sbi).noextensions;
1209			noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1210
1211			if (strlen(name) >= F2FS_EXTENSION_LEN ||
1212				noext_cnt >= COMPRESS_EXT_NUM) {
1213				f2fs_err(sbi,
1214					"invalid extension length/number");
1215				kfree(name);
1216				return -EINVAL;
1217			}
1218
1219			if (is_compress_extension_exist(sbi, name, false)) {
1220				kfree(name);
1221				break;
1222			}
1223
1224			strcpy(noext[noext_cnt], name);
1225			F2FS_OPTION(sbi).nocompress_ext_cnt++;
1226			kfree(name);
1227			break;
1228		case Opt_compress_chksum:
1229			if (!f2fs_sb_has_compression(sbi)) {
1230				f2fs_info(sbi, "Image doesn't support compression");
1231				break;
1232			}
1233			F2FS_OPTION(sbi).compress_chksum = true;
1234			break;
1235		case Opt_compress_mode:
1236			if (!f2fs_sb_has_compression(sbi)) {
1237				f2fs_info(sbi, "Image doesn't support compression");
1238				break;
1239			}
1240			name = match_strdup(&args[0]);
1241			if (!name)
1242				return -ENOMEM;
1243			if (!strcmp(name, "fs")) {
1244				F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1245			} else if (!strcmp(name, "user")) {
1246				F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1247			} else {
1248				kfree(name);
1249				return -EINVAL;
1250			}
1251			kfree(name);
1252			break;
1253		case Opt_compress_cache:
1254			if (!f2fs_sb_has_compression(sbi)) {
1255				f2fs_info(sbi, "Image doesn't support compression");
1256				break;
1257			}
1258			set_opt(sbi, COMPRESS_CACHE);
1259			break;
1260#else
1261		case Opt_compress_algorithm:
1262		case Opt_compress_log_size:
1263		case Opt_compress_extension:
1264		case Opt_nocompress_extension:
1265		case Opt_compress_chksum:
1266		case Opt_compress_mode:
1267		case Opt_compress_cache:
1268			f2fs_info(sbi, "compression options not supported");
1269			break;
1270#endif
1271		case Opt_atgc:
1272			set_opt(sbi, ATGC);
1273			break;
1274		case Opt_gc_merge:
1275			set_opt(sbi, GC_MERGE);
1276			break;
1277		case Opt_nogc_merge:
1278			clear_opt(sbi, GC_MERGE);
1279			break;
1280		case Opt_discard_unit:
1281			name = match_strdup(&args[0]);
1282			if (!name)
1283				return -ENOMEM;
1284			if (!strcmp(name, "block")) {
1285				F2FS_OPTION(sbi).discard_unit =
1286						DISCARD_UNIT_BLOCK;
1287			} else if (!strcmp(name, "segment")) {
1288				F2FS_OPTION(sbi).discard_unit =
1289						DISCARD_UNIT_SEGMENT;
1290			} else if (!strcmp(name, "section")) {
1291				F2FS_OPTION(sbi).discard_unit =
1292						DISCARD_UNIT_SECTION;
1293			} else {
1294				kfree(name);
1295				return -EINVAL;
1296			}
1297			kfree(name);
1298			break;
1299		case Opt_memory_mode:
1300			name = match_strdup(&args[0]);
1301			if (!name)
1302				return -ENOMEM;
1303			if (!strcmp(name, "normal")) {
1304				F2FS_OPTION(sbi).memory_mode =
1305						MEMORY_MODE_NORMAL;
1306			} else if (!strcmp(name, "low")) {
1307				F2FS_OPTION(sbi).memory_mode =
1308						MEMORY_MODE_LOW;
1309			} else {
1310				kfree(name);
1311				return -EINVAL;
1312			}
1313			kfree(name);
1314			break;
1315		case Opt_age_extent_cache:
1316			set_opt(sbi, AGE_EXTENT_CACHE);
1317			break;
1318		case Opt_errors:
1319			name = match_strdup(&args[0]);
1320			if (!name)
1321				return -ENOMEM;
1322			if (!strcmp(name, "remount-ro")) {
1323				F2FS_OPTION(sbi).errors =
1324						MOUNT_ERRORS_READONLY;
1325			} else if (!strcmp(name, "continue")) {
1326				F2FS_OPTION(sbi).errors =
1327						MOUNT_ERRORS_CONTINUE;
1328			} else if (!strcmp(name, "panic")) {
1329				F2FS_OPTION(sbi).errors =
1330						MOUNT_ERRORS_PANIC;
1331			} else {
1332				kfree(name);
1333				return -EINVAL;
1334			}
1335			kfree(name);
1336			break;
1337		default:
1338			f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1339				 p);
1340			return -EINVAL;
1341		}
1342	}
1343default_check:
1344#ifdef CONFIG_QUOTA
1345	if (f2fs_check_quota_options(sbi))
1346		return -EINVAL;
1347#else
1348	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1349		f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1350		return -EINVAL;
1351	}
1352	if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1353		f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1354		return -EINVAL;
1355	}
1356#endif
1357#if !IS_ENABLED(CONFIG_UNICODE)
1358	if (f2fs_sb_has_casefold(sbi)) {
1359		f2fs_err(sbi,
1360			"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1361		return -EINVAL;
1362	}
1363#endif
1364	/*
1365	 * The BLKZONED feature indicates that the drive was formatted with
1366	 * zone alignment optimization. This is optional for host-aware
1367	 * devices, but mandatory for host-managed zoned block devices.
1368	 */
1369	if (f2fs_sb_has_blkzoned(sbi)) {
1370#ifdef CONFIG_BLK_DEV_ZONED
1371		if (F2FS_OPTION(sbi).discard_unit !=
1372						DISCARD_UNIT_SECTION) {
1373			f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1374			F2FS_OPTION(sbi).discard_unit =
1375					DISCARD_UNIT_SECTION;
1376		}
1377
1378		if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
1379			f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
1380			return -EINVAL;
1381		}
1382#else
1383		f2fs_err(sbi, "Zoned block device support is not enabled");
1384		return -EINVAL;
1385#endif
1386	}
1387
1388#ifdef CONFIG_F2FS_FS_COMPRESSION
1389	if (f2fs_test_compress_extension(sbi)) {
1390		f2fs_err(sbi, "invalid compress or nocompress extension");
1391		return -EINVAL;
1392	}
1393#endif
1394
1395	if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1396		f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO",
1397			 F2FS_IO_SIZE_KB(sbi));
1398		return -EINVAL;
1399	}
1400
1401	if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1402		int min_size, max_size;
1403
1404		if (!f2fs_sb_has_extra_attr(sbi) ||
1405			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
1406			f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1407			return -EINVAL;
1408		}
1409		if (!test_opt(sbi, INLINE_XATTR)) {
1410			f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1411			return -EINVAL;
1412		}
1413
1414		min_size = MIN_INLINE_XATTR_SIZE;
1415		max_size = MAX_INLINE_XATTR_SIZE;
1416
1417		if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1418				F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1419			f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1420				 min_size, max_size);
1421			return -EINVAL;
1422		}
1423	}
1424
1425	if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) {
1426		f2fs_err(sbi, "LFS is not compatible with ATGC");
1427		return -EINVAL;
1428	}
1429
1430	if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) {
1431		f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
1432		return -EINVAL;
1433	}
1434
1435	if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1436		f2fs_err(sbi, "Allow to mount readonly mode only");
1437		return -EROFS;
1438	}
1439	return 0;
1440}
1441
1442static struct inode *f2fs_alloc_inode(struct super_block *sb)
1443{
1444	struct f2fs_inode_info *fi;
1445
1446	if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
1447		return NULL;
1448
1449	fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
1450	if (!fi)
1451		return NULL;
1452
1453	init_once((void *) fi);
1454
1455	/* Initialize f2fs-specific inode info */
1456	atomic_set(&fi->dirty_pages, 0);
1457	atomic_set(&fi->i_compr_blocks, 0);
1458	init_f2fs_rwsem(&fi->i_sem);
1459	spin_lock_init(&fi->i_size_lock);
1460	INIT_LIST_HEAD(&fi->dirty_list);
1461	INIT_LIST_HEAD(&fi->gdirty_list);
1462	init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1463	init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1464	init_f2fs_rwsem(&fi->i_xattr_sem);
1465
1466	/* Will be used by directory only */
1467	fi->i_dir_level = F2FS_SB(sb)->dir_level;
1468
1469	return &fi->vfs_inode;
1470}
1471
1472static int f2fs_drop_inode(struct inode *inode)
1473{
1474	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1475	int ret;
1476
1477	/*
1478	 * during filesystem shutdown, if checkpoint is disabled,
1479	 * drop useless meta/node dirty pages.
1480	 */
1481	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1482		if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1483			inode->i_ino == F2FS_META_INO(sbi)) {
1484			trace_f2fs_drop_inode(inode, 1);
1485			return 1;
1486		}
1487	}
1488
1489	/*
1490	 * This is to avoid a deadlock condition like below.
1491	 * writeback_single_inode(inode)
1492	 *  - f2fs_write_data_page
1493	 *    - f2fs_gc -> iput -> evict
1494	 *       - inode_wait_for_writeback(inode)
1495	 */
1496	if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1497		if (!inode->i_nlink && !is_bad_inode(inode)) {
1498			/* to avoid evict_inode call simultaneously */
1499			atomic_inc(&inode->i_count);
1500			spin_unlock(&inode->i_lock);
1501
1502			/* should remain fi->extent_tree for writepage */
1503			f2fs_destroy_extent_node(inode);
1504
1505			sb_start_intwrite(inode->i_sb);
1506			f2fs_i_size_write(inode, 0);
1507
1508			f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1509					inode, NULL, 0, DATA);
1510			truncate_inode_pages_final(inode->i_mapping);
1511
1512			if (F2FS_HAS_BLOCKS(inode))
1513				f2fs_truncate(inode);
1514
1515			sb_end_intwrite(inode->i_sb);
1516
1517			spin_lock(&inode->i_lock);
1518			atomic_dec(&inode->i_count);
1519		}
1520		trace_f2fs_drop_inode(inode, 0);
1521		return 0;
1522	}
1523	ret = generic_drop_inode(inode);
1524	if (!ret)
1525		ret = fscrypt_drop_inode(inode);
1526	trace_f2fs_drop_inode(inode, ret);
1527	return ret;
1528}
1529
1530int f2fs_inode_dirtied(struct inode *inode, bool sync)
1531{
1532	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1533	int ret = 0;
1534
1535	spin_lock(&sbi->inode_lock[DIRTY_META]);
1536	if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1537		ret = 1;
1538	} else {
1539		set_inode_flag(inode, FI_DIRTY_INODE);
1540		stat_inc_dirty_inode(sbi, DIRTY_META);
1541	}
1542	if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1543		list_add_tail(&F2FS_I(inode)->gdirty_list,
1544				&sbi->inode_list[DIRTY_META]);
1545		inc_page_count(sbi, F2FS_DIRTY_IMETA);
1546	}
1547	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1548	return ret;
1549}
1550
1551void f2fs_inode_synced(struct inode *inode)
1552{
1553	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1554
1555	spin_lock(&sbi->inode_lock[DIRTY_META]);
1556	if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1557		spin_unlock(&sbi->inode_lock[DIRTY_META]);
1558		return;
1559	}
1560	if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1561		list_del_init(&F2FS_I(inode)->gdirty_list);
1562		dec_page_count(sbi, F2FS_DIRTY_IMETA);
1563	}
1564	clear_inode_flag(inode, FI_DIRTY_INODE);
1565	clear_inode_flag(inode, FI_AUTO_RECOVER);
1566	stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1567	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1568}
1569
1570/*
1571 * f2fs_dirty_inode() is called from __mark_inode_dirty()
1572 *
1573 * We should call set_dirty_inode to write the dirty inode through write_inode.
1574 */
1575static void f2fs_dirty_inode(struct inode *inode, int flags)
1576{
1577	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1578
1579	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1580			inode->i_ino == F2FS_META_INO(sbi))
1581		return;
1582
1583	if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1584		clear_inode_flag(inode, FI_AUTO_RECOVER);
1585
1586	f2fs_inode_dirtied(inode, false);
1587}
1588
1589static void f2fs_free_inode(struct inode *inode)
1590{
1591	fscrypt_free_inode(inode);
1592	kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1593}
1594
1595static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1596{
1597	percpu_counter_destroy(&sbi->total_valid_inode_count);
1598	percpu_counter_destroy(&sbi->rf_node_block_count);
1599	percpu_counter_destroy(&sbi->alloc_valid_block_count);
1600}
1601
1602static void destroy_device_list(struct f2fs_sb_info *sbi)
1603{
1604	int i;
1605
1606	for (i = 0; i < sbi->s_ndevs; i++) {
1607		if (i > 0)
1608			bdev_release(FDEV(i).bdev_handle);
1609#ifdef CONFIG_BLK_DEV_ZONED
1610		kvfree(FDEV(i).blkz_seq);
1611#endif
1612	}
1613	kvfree(sbi->devs);
1614}
1615
1616static void f2fs_put_super(struct super_block *sb)
1617{
1618	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1619	int i;
1620	int err = 0;
1621	bool done;
1622
1623	/* unregister procfs/sysfs entries in advance to avoid race case */
1624	f2fs_unregister_sysfs(sbi);
1625
1626	f2fs_quota_off_umount(sb);
1627
1628	/* prevent remaining shrinker jobs */
1629	mutex_lock(&sbi->umount_mutex);
1630
1631	/*
1632	 * flush all issued checkpoints and stop checkpoint issue thread.
1633	 * after then, all checkpoints should be done by each process context.
1634	 */
1635	f2fs_stop_ckpt_thread(sbi);
1636
1637	/*
1638	 * We don't need to do checkpoint when superblock is clean.
1639	 * But, the previous checkpoint was not done by umount, it needs to do
1640	 * clean checkpoint again.
1641	 */
1642	if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1643			!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1644		struct cp_control cpc = {
1645			.reason = CP_UMOUNT,
1646		};
1647		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1648		err = f2fs_write_checkpoint(sbi, &cpc);
1649	}
1650
1651	/* be sure to wait for any on-going discard commands */
1652	done = f2fs_issue_discard_timeout(sbi);
1653	if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
1654		struct cp_control cpc = {
1655			.reason = CP_UMOUNT | CP_TRIMMED,
1656		};
1657		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1658		err = f2fs_write_checkpoint(sbi, &cpc);
1659	}
1660
1661	/*
1662	 * normally superblock is clean, so we need to release this.
1663	 * In addition, EIO will skip do checkpoint, we need this as well.
1664	 */
1665	f2fs_release_ino_entry(sbi, true);
1666
1667	f2fs_leave_shrinker(sbi);
1668	mutex_unlock(&sbi->umount_mutex);
1669
1670	/* our cp_error case, we can wait for any writeback page */
1671	f2fs_flush_merged_writes(sbi);
1672
1673	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1674
1675	if (err || f2fs_cp_error(sbi)) {
1676		truncate_inode_pages_final(NODE_MAPPING(sbi));
1677		truncate_inode_pages_final(META_MAPPING(sbi));
1678	}
1679
1680	for (i = 0; i < NR_COUNT_TYPE; i++) {
1681		if (!get_pages(sbi, i))
1682			continue;
1683		f2fs_err(sbi, "detect filesystem reference count leak during "
1684			"umount, type: %d, count: %lld", i, get_pages(sbi, i));
1685		f2fs_bug_on(sbi, 1);
1686	}
1687
1688	f2fs_bug_on(sbi, sbi->fsync_node_num);
1689
1690	f2fs_destroy_compress_inode(sbi);
1691
1692	iput(sbi->node_inode);
1693	sbi->node_inode = NULL;
1694
1695	iput(sbi->meta_inode);
1696	sbi->meta_inode = NULL;
1697
1698	/*
1699	 * iput() can update stat information, if f2fs_write_checkpoint()
1700	 * above failed with error.
1701	 */
1702	f2fs_destroy_stats(sbi);
1703
1704	/* destroy f2fs internal modules */
1705	f2fs_destroy_node_manager(sbi);
1706	f2fs_destroy_segment_manager(sbi);
1707
1708	/* flush s_error_work before sbi destroy */
1709	flush_work(&sbi->s_error_work);
1710
1711	f2fs_destroy_post_read_wq(sbi);
1712
1713	kvfree(sbi->ckpt);
1714
1715	if (sbi->s_chksum_driver)
1716		crypto_free_shash(sbi->s_chksum_driver);
1717	kfree(sbi->raw_super);
1718
1719	f2fs_destroy_page_array_cache(sbi);
1720	f2fs_destroy_xattr_caches(sbi);
1721	mempool_destroy(sbi->write_io_dummy);
1722#ifdef CONFIG_QUOTA
1723	for (i = 0; i < MAXQUOTAS; i++)
1724		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1725#endif
1726	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1727	destroy_percpu_info(sbi);
1728	f2fs_destroy_iostat(sbi);
1729	for (i = 0; i < NR_PAGE_TYPE; i++)
1730		kvfree(sbi->write_io[i]);
1731#if IS_ENABLED(CONFIG_UNICODE)
1732	utf8_unload(sb->s_encoding);
1733#endif
1734}
1735
1736int f2fs_sync_fs(struct super_block *sb, int sync)
1737{
1738	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1739	int err = 0;
1740
1741	if (unlikely(f2fs_cp_error(sbi)))
1742		return 0;
1743	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1744		return 0;
1745
1746	trace_f2fs_sync_fs(sb, sync);
1747
1748	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1749		return -EAGAIN;
1750
1751	if (sync) {
1752		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1753		err = f2fs_issue_checkpoint(sbi);
1754	}
1755
1756	return err;
1757}
1758
1759static int f2fs_freeze(struct super_block *sb)
1760{
1761	if (f2fs_readonly(sb))
1762		return 0;
1763
1764	/* IO error happened before */
1765	if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1766		return -EIO;
1767
1768	/* must be clean, since sync_filesystem() was already called */
1769	if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1770		return -EINVAL;
1771
1772	/* Let's flush checkpoints and stop the thread. */
1773	f2fs_flush_ckpt_thread(F2FS_SB(sb));
1774
1775	/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
1776	set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1777	return 0;
1778}
1779
1780static int f2fs_unfreeze(struct super_block *sb)
1781{
1782	clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1783	return 0;
1784}
1785
1786#ifdef CONFIG_QUOTA
1787static int f2fs_statfs_project(struct super_block *sb,
1788				kprojid_t projid, struct kstatfs *buf)
1789{
1790	struct kqid qid;
1791	struct dquot *dquot;
1792	u64 limit;
1793	u64 curblock;
1794
1795	qid = make_kqid_projid(projid);
1796	dquot = dqget(sb, qid);
1797	if (IS_ERR(dquot))
1798		return PTR_ERR(dquot);
1799	spin_lock(&dquot->dq_dqb_lock);
1800
1801	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1802					dquot->dq_dqb.dqb_bhardlimit);
1803	if (limit)
1804		limit >>= sb->s_blocksize_bits;
1805
1806	if (limit && buf->f_blocks > limit) {
1807		curblock = (dquot->dq_dqb.dqb_curspace +
1808			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1809		buf->f_blocks = limit;
1810		buf->f_bfree = buf->f_bavail =
1811			(buf->f_blocks > curblock) ?
1812			 (buf->f_blocks - curblock) : 0;
1813	}
1814
1815	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1816					dquot->dq_dqb.dqb_ihardlimit);
1817
1818	if (limit && buf->f_files > limit) {
1819		buf->f_files = limit;
1820		buf->f_ffree =
1821			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1822			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1823	}
1824
1825	spin_unlock(&dquot->dq_dqb_lock);
1826	dqput(dquot);
1827	return 0;
1828}
1829#endif
1830
1831static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1832{
1833	struct super_block *sb = dentry->d_sb;
1834	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1835	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1836	block_t total_count, user_block_count, start_count;
1837	u64 avail_node_count;
1838	unsigned int total_valid_node_count;
1839
1840	total_count = le64_to_cpu(sbi->raw_super->block_count);
1841	start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1842	buf->f_type = F2FS_SUPER_MAGIC;
1843	buf->f_bsize = sbi->blocksize;
1844
1845	buf->f_blocks = total_count - start_count;
1846
1847	spin_lock(&sbi->stat_lock);
1848
1849	user_block_count = sbi->user_block_count;
1850	total_valid_node_count = valid_node_count(sbi);
1851	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1852	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1853						sbi->current_reserved_blocks;
1854
1855	if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1856		buf->f_bfree = 0;
1857	else
1858		buf->f_bfree -= sbi->unusable_block_count;
1859	spin_unlock(&sbi->stat_lock);
1860
1861	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1862		buf->f_bavail = buf->f_bfree -
1863				F2FS_OPTION(sbi).root_reserved_blocks;
1864	else
1865		buf->f_bavail = 0;
1866
1867	if (avail_node_count > user_block_count) {
1868		buf->f_files = user_block_count;
1869		buf->f_ffree = buf->f_bavail;
1870	} else {
1871		buf->f_files = avail_node_count;
1872		buf->f_ffree = min(avail_node_count - total_valid_node_count,
1873					buf->f_bavail);
1874	}
1875
1876	buf->f_namelen = F2FS_NAME_LEN;
1877	buf->f_fsid    = u64_to_fsid(id);
1878
1879#ifdef CONFIG_QUOTA
1880	if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1881			sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1882		f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1883	}
1884#endif
1885	return 0;
1886}
1887
1888static inline void f2fs_show_quota_options(struct seq_file *seq,
1889					   struct super_block *sb)
1890{
1891#ifdef CONFIG_QUOTA
1892	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1893
1894	if (F2FS_OPTION(sbi).s_jquota_fmt) {
1895		char *fmtname = "";
1896
1897		switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1898		case QFMT_VFS_OLD:
1899			fmtname = "vfsold";
1900			break;
1901		case QFMT_VFS_V0:
1902			fmtname = "vfsv0";
1903			break;
1904		case QFMT_VFS_V1:
1905			fmtname = "vfsv1";
1906			break;
1907		}
1908		seq_printf(seq, ",jqfmt=%s", fmtname);
1909	}
1910
1911	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1912		seq_show_option(seq, "usrjquota",
1913			F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1914
1915	if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1916		seq_show_option(seq, "grpjquota",
1917			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1918
1919	if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1920		seq_show_option(seq, "prjjquota",
1921			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1922#endif
1923}
1924
1925#ifdef CONFIG_F2FS_FS_COMPRESSION
1926static inline void f2fs_show_compress_options(struct seq_file *seq,
1927							struct super_block *sb)
1928{
1929	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1930	char *algtype = "";
1931	int i;
1932
1933	if (!f2fs_sb_has_compression(sbi))
1934		return;
1935
1936	switch (F2FS_OPTION(sbi).compress_algorithm) {
1937	case COMPRESS_LZO:
1938		algtype = "lzo";
1939		break;
1940	case COMPRESS_LZ4:
1941		algtype = "lz4";
1942		break;
1943	case COMPRESS_ZSTD:
1944		algtype = "zstd";
1945		break;
1946	case COMPRESS_LZORLE:
1947		algtype = "lzo-rle";
1948		break;
1949	}
1950	seq_printf(seq, ",compress_algorithm=%s", algtype);
1951
1952	if (F2FS_OPTION(sbi).compress_level)
1953		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1954
1955	seq_printf(seq, ",compress_log_size=%u",
1956			F2FS_OPTION(sbi).compress_log_size);
1957
1958	for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1959		seq_printf(seq, ",compress_extension=%s",
1960			F2FS_OPTION(sbi).extensions[i]);
1961	}
1962
1963	for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
1964		seq_printf(seq, ",nocompress_extension=%s",
1965			F2FS_OPTION(sbi).noextensions[i]);
1966	}
1967
1968	if (F2FS_OPTION(sbi).compress_chksum)
1969		seq_puts(seq, ",compress_chksum");
1970
1971	if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1972		seq_printf(seq, ",compress_mode=%s", "fs");
1973	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1974		seq_printf(seq, ",compress_mode=%s", "user");
1975
1976	if (test_opt(sbi, COMPRESS_CACHE))
1977		seq_puts(seq, ",compress_cache");
1978}
1979#endif
1980
1981static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1982{
1983	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1984
1985	if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1986		seq_printf(seq, ",background_gc=%s", "sync");
1987	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1988		seq_printf(seq, ",background_gc=%s", "on");
1989	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1990		seq_printf(seq, ",background_gc=%s", "off");
1991
1992	if (test_opt(sbi, GC_MERGE))
1993		seq_puts(seq, ",gc_merge");
1994	else
1995		seq_puts(seq, ",nogc_merge");
1996
1997	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1998		seq_puts(seq, ",disable_roll_forward");
1999	if (test_opt(sbi, NORECOVERY))
2000		seq_puts(seq, ",norecovery");
2001	if (test_opt(sbi, DISCARD)) {
2002		seq_puts(seq, ",discard");
2003		if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
2004			seq_printf(seq, ",discard_unit=%s", "block");
2005		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2006			seq_printf(seq, ",discard_unit=%s", "segment");
2007		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2008			seq_printf(seq, ",discard_unit=%s", "section");
2009	} else {
2010		seq_puts(seq, ",nodiscard");
2011	}
2012	if (test_opt(sbi, NOHEAP))
2013		seq_puts(seq, ",no_heap");
2014	else
2015		seq_puts(seq, ",heap");
2016#ifdef CONFIG_F2FS_FS_XATTR
2017	if (test_opt(sbi, XATTR_USER))
2018		seq_puts(seq, ",user_xattr");
2019	else
2020		seq_puts(seq, ",nouser_xattr");
2021	if (test_opt(sbi, INLINE_XATTR))
2022		seq_puts(seq, ",inline_xattr");
2023	else
2024		seq_puts(seq, ",noinline_xattr");
2025	if (test_opt(sbi, INLINE_XATTR_SIZE))
2026		seq_printf(seq, ",inline_xattr_size=%u",
2027					F2FS_OPTION(sbi).inline_xattr_size);
2028#endif
2029#ifdef CONFIG_F2FS_FS_POSIX_ACL
2030	if (test_opt(sbi, POSIX_ACL))
2031		seq_puts(seq, ",acl");
2032	else
2033		seq_puts(seq, ",noacl");
2034#endif
2035	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
2036		seq_puts(seq, ",disable_ext_identify");
2037	if (test_opt(sbi, INLINE_DATA))
2038		seq_puts(seq, ",inline_data");
2039	else
2040		seq_puts(seq, ",noinline_data");
2041	if (test_opt(sbi, INLINE_DENTRY))
2042		seq_puts(seq, ",inline_dentry");
2043	else
2044		seq_puts(seq, ",noinline_dentry");
2045	if (test_opt(sbi, FLUSH_MERGE))
2046		seq_puts(seq, ",flush_merge");
2047	else
2048		seq_puts(seq, ",noflush_merge");
2049	if (test_opt(sbi, NOBARRIER))
2050		seq_puts(seq, ",nobarrier");
2051	else
2052		seq_puts(seq, ",barrier");
2053	if (test_opt(sbi, FASTBOOT))
2054		seq_puts(seq, ",fastboot");
2055	if (test_opt(sbi, READ_EXTENT_CACHE))
2056		seq_puts(seq, ",extent_cache");
2057	else
2058		seq_puts(seq, ",noextent_cache");
2059	if (test_opt(sbi, AGE_EXTENT_CACHE))
2060		seq_puts(seq, ",age_extent_cache");
2061	if (test_opt(sbi, DATA_FLUSH))
2062		seq_puts(seq, ",data_flush");
2063
2064	seq_puts(seq, ",mode=");
2065	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
2066		seq_puts(seq, "adaptive");
2067	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
2068		seq_puts(seq, "lfs");
2069	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
2070		seq_puts(seq, "fragment:segment");
2071	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2072		seq_puts(seq, "fragment:block");
2073	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
2074	if (test_opt(sbi, RESERVE_ROOT))
2075		seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
2076				F2FS_OPTION(sbi).root_reserved_blocks,
2077				from_kuid_munged(&init_user_ns,
2078					F2FS_OPTION(sbi).s_resuid),
2079				from_kgid_munged(&init_user_ns,
2080					F2FS_OPTION(sbi).s_resgid));
2081	if (F2FS_IO_SIZE_BITS(sbi))
2082		seq_printf(seq, ",io_bits=%u",
2083				F2FS_OPTION(sbi).write_io_size_bits);
2084#ifdef CONFIG_F2FS_FAULT_INJECTION
2085	if (test_opt(sbi, FAULT_INJECTION)) {
2086		seq_printf(seq, ",fault_injection=%u",
2087				F2FS_OPTION(sbi).fault_info.inject_rate);
2088		seq_printf(seq, ",fault_type=%u",
2089				F2FS_OPTION(sbi).fault_info.inject_type);
2090	}
2091#endif
2092#ifdef CONFIG_QUOTA
2093	if (test_opt(sbi, QUOTA))
2094		seq_puts(seq, ",quota");
2095	if (test_opt(sbi, USRQUOTA))
2096		seq_puts(seq, ",usrquota");
2097	if (test_opt(sbi, GRPQUOTA))
2098		seq_puts(seq, ",grpquota");
2099	if (test_opt(sbi, PRJQUOTA))
2100		seq_puts(seq, ",prjquota");
2101#endif
2102	f2fs_show_quota_options(seq, sbi->sb);
2103
2104	fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
2105
2106	if (sbi->sb->s_flags & SB_INLINECRYPT)
2107		seq_puts(seq, ",inlinecrypt");
2108
2109	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
2110		seq_printf(seq, ",alloc_mode=%s", "default");
2111	else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2112		seq_printf(seq, ",alloc_mode=%s", "reuse");
2113
2114	if (test_opt(sbi, DISABLE_CHECKPOINT))
2115		seq_printf(seq, ",checkpoint=disable:%u",
2116				F2FS_OPTION(sbi).unusable_cap);
2117	if (test_opt(sbi, MERGE_CHECKPOINT))
2118		seq_puts(seq, ",checkpoint_merge");
2119	else
2120		seq_puts(seq, ",nocheckpoint_merge");
2121	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
2122		seq_printf(seq, ",fsync_mode=%s", "posix");
2123	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
2124		seq_printf(seq, ",fsync_mode=%s", "strict");
2125	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
2126		seq_printf(seq, ",fsync_mode=%s", "nobarrier");
2127
2128#ifdef CONFIG_F2FS_FS_COMPRESSION
2129	f2fs_show_compress_options(seq, sbi->sb);
2130#endif
2131
2132	if (test_opt(sbi, ATGC))
2133		seq_puts(seq, ",atgc");
2134
2135	if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
2136		seq_printf(seq, ",memory=%s", "normal");
2137	else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
2138		seq_printf(seq, ",memory=%s", "low");
2139
2140	if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2141		seq_printf(seq, ",errors=%s", "remount-ro");
2142	else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
2143		seq_printf(seq, ",errors=%s", "continue");
2144	else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
2145		seq_printf(seq, ",errors=%s", "panic");
2146
2147	return 0;
2148}
2149
2150static void default_options(struct f2fs_sb_info *sbi, bool remount)
2151{
2152	/* init some FS parameters */
2153	if (!remount) {
2154		set_opt(sbi, READ_EXTENT_CACHE);
2155		clear_opt(sbi, DISABLE_CHECKPOINT);
2156
2157		if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2158			set_opt(sbi, DISCARD);
2159
2160		if (f2fs_sb_has_blkzoned(sbi))
2161			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2162		else
2163			F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2164	}
2165
2166	if (f2fs_sb_has_readonly(sbi))
2167		F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
2168	else
2169		F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
2170
2171	F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
2172	if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
2173							SMALL_VOLUME_SEGMENTS)
2174		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2175	else
2176		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
2177	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
2178	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2179	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2180	if (f2fs_sb_has_compression(sbi)) {
2181		F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
2182		F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
2183		F2FS_OPTION(sbi).compress_ext_cnt = 0;
2184		F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2185	}
2186	F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2187	F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
2188	F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
2189
2190	sbi->sb->s_flags &= ~SB_INLINECRYPT;
2191
2192	set_opt(sbi, INLINE_XATTR);
2193	set_opt(sbi, INLINE_DATA);
2194	set_opt(sbi, INLINE_DENTRY);
2195	set_opt(sbi, NOHEAP);
2196	set_opt(sbi, MERGE_CHECKPOINT);
2197	F2FS_OPTION(sbi).unusable_cap = 0;
2198	sbi->sb->s_flags |= SB_LAZYTIME;
2199	if (!f2fs_is_readonly(sbi))
2200		set_opt(sbi, FLUSH_MERGE);
2201	if (f2fs_sb_has_blkzoned(sbi))
2202		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2203	else
2204		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2205
2206#ifdef CONFIG_F2FS_FS_XATTR
2207	set_opt(sbi, XATTR_USER);
2208#endif
2209#ifdef CONFIG_F2FS_FS_POSIX_ACL
2210	set_opt(sbi, POSIX_ACL);
2211#endif
2212
2213	f2fs_build_fault_attr(sbi, 0, 0);
2214}
2215
2216#ifdef CONFIG_QUOTA
2217static int f2fs_enable_quotas(struct super_block *sb);
2218#endif
2219
2220static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2221{
2222	unsigned int s_flags = sbi->sb->s_flags;
2223	struct cp_control cpc;
2224	unsigned int gc_mode = sbi->gc_mode;
2225	int err = 0;
2226	int ret;
2227	block_t unusable;
2228
2229	if (s_flags & SB_RDONLY) {
2230		f2fs_err(sbi, "checkpoint=disable on readonly fs");
2231		return -EINVAL;
2232	}
2233	sbi->sb->s_flags |= SB_ACTIVE;
2234
2235	/* check if we need more GC first */
2236	unusable = f2fs_get_unusable_blocks(sbi);
2237	if (!f2fs_disable_cp_again(sbi, unusable))
2238		goto skip_gc;
2239
2240	f2fs_update_time(sbi, DISABLE_TIME);
2241
2242	sbi->gc_mode = GC_URGENT_HIGH;
2243
2244	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2245		struct f2fs_gc_control gc_control = {
2246			.victim_segno = NULL_SEGNO,
2247			.init_gc_type = FG_GC,
2248			.should_migrate_blocks = false,
2249			.err_gc_skipped = true,
2250			.nr_free_secs = 1 };
2251
2252		f2fs_down_write(&sbi->gc_lock);
2253		stat_inc_gc_call_count(sbi, FOREGROUND);
2254		err = f2fs_gc(sbi, &gc_control);
2255		if (err == -ENODATA) {
2256			err = 0;
2257			break;
2258		}
2259		if (err && err != -EAGAIN)
2260			break;
2261	}
2262
2263	ret = sync_filesystem(sbi->sb);
2264	if (ret || err) {
2265		err = ret ? ret : err;
2266		goto restore_flag;
2267	}
2268
2269	unusable = f2fs_get_unusable_blocks(sbi);
2270	if (f2fs_disable_cp_again(sbi, unusable)) {
2271		err = -EAGAIN;
2272		goto restore_flag;
2273	}
2274
2275skip_gc:
2276	f2fs_down_write(&sbi->gc_lock);
2277	cpc.reason = CP_PAUSE;
2278	set_sbi_flag(sbi, SBI_CP_DISABLED);
2279	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2280	err = f2fs_write_checkpoint(sbi, &cpc);
2281	if (err)
2282		goto out_unlock;
2283
2284	spin_lock(&sbi->stat_lock);
2285	sbi->unusable_block_count = unusable;
2286	spin_unlock(&sbi->stat_lock);
2287
2288out_unlock:
2289	f2fs_up_write(&sbi->gc_lock);
2290restore_flag:
2291	sbi->gc_mode = gc_mode;
2292	sbi->sb->s_flags = s_flags;	/* Restore SB_RDONLY status */
2293	return err;
2294}
2295
2296static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2297{
2298	int retry = DEFAULT_RETRY_IO_COUNT;
2299
2300	/* we should flush all the data to keep data consistency */
2301	do {
2302		sync_inodes_sb(sbi->sb);
2303		f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2304	} while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
2305
2306	if (unlikely(retry < 0))
2307		f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
2308
2309	f2fs_down_write(&sbi->gc_lock);
2310	f2fs_dirty_to_prefree(sbi);
2311
2312	clear_sbi_flag(sbi, SBI_CP_DISABLED);
2313	set_sbi_flag(sbi, SBI_IS_DIRTY);
2314	f2fs_up_write(&sbi->gc_lock);
2315
2316	f2fs_sync_fs(sbi->sb, 1);
2317
2318	/* Let's ensure there's no pending checkpoint anymore */
2319	f2fs_flush_ckpt_thread(sbi);
2320}
2321
2322static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2323{
2324	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2325	struct f2fs_mount_info org_mount_opt;
2326	unsigned long old_sb_flags;
2327	int err;
2328	bool need_restart_gc = false, need_stop_gc = false;
2329	bool need_restart_flush = false, need_stop_flush = false;
2330	bool need_restart_discard = false, need_stop_discard = false;
2331	bool need_enable_checkpoint = false, need_disable_checkpoint = false;
2332	bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
2333	bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
2334	bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2335	bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2336	bool no_atgc = !test_opt(sbi, ATGC);
2337	bool no_discard = !test_opt(sbi, DISCARD);
2338	bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2339	bool block_unit_discard = f2fs_block_unit_discard(sbi);
2340#ifdef CONFIG_QUOTA
2341	int i, j;
2342#endif
2343
2344	/*
2345	 * Save the old mount options in case we
2346	 * need to restore them.
2347	 */
2348	org_mount_opt = sbi->mount_opt;
2349	old_sb_flags = sb->s_flags;
2350
2351#ifdef CONFIG_QUOTA
2352	org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2353	for (i = 0; i < MAXQUOTAS; i++) {
2354		if (F2FS_OPTION(sbi).s_qf_names[i]) {
2355			org_mount_opt.s_qf_names[i] =
2356				kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2357				GFP_KERNEL);
2358			if (!org_mount_opt.s_qf_names[i]) {
2359				for (j = 0; j < i; j++)
2360					kfree(org_mount_opt.s_qf_names[j]);
2361				return -ENOMEM;
2362			}
2363		} else {
2364			org_mount_opt.s_qf_names[i] = NULL;
2365		}
2366	}
2367#endif
2368
2369	/* recover superblocks we couldn't write due to previous RO mount */
2370	if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2371		err = f2fs_commit_super(sbi, false);
2372		f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2373			  err);
2374		if (!err)
2375			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2376	}
2377
2378	default_options(sbi, true);
2379
2380	/* parse mount options */
2381	err = parse_options(sb, data, true);
2382	if (err)
2383		goto restore_opts;
2384
2385	/* flush outstanding errors before changing fs state */
2386	flush_work(&sbi->s_error_work);
2387
2388	/*
2389	 * Previous and new state of filesystem is RO,
2390	 * so skip checking GC and FLUSH_MERGE conditions.
2391	 */
2392	if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2393		goto skip;
2394
2395	if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) {
2396		err = -EROFS;
2397		goto restore_opts;
2398	}
2399
2400#ifdef CONFIG_QUOTA
2401	if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2402		err = dquot_suspend(sb, -1);
2403		if (err < 0)
2404			goto restore_opts;
2405	} else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2406		/* dquot_resume needs RW */
2407		sb->s_flags &= ~SB_RDONLY;
2408		if (sb_any_quota_suspended(sb)) {
2409			dquot_resume(sb, -1);
2410		} else if (f2fs_sb_has_quota_ino(sbi)) {
2411			err = f2fs_enable_quotas(sb);
2412			if (err)
2413				goto restore_opts;
2414		}
2415	}
2416#endif
2417	if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
2418		err = -EINVAL;
2419		f2fs_warn(sbi, "LFS is not compatible with IPU");
2420		goto restore_opts;
2421	}
2422
2423	/* disallow enable atgc dynamically */
2424	if (no_atgc == !!test_opt(sbi, ATGC)) {
2425		err = -EINVAL;
2426		f2fs_warn(sbi, "switch atgc option is not allowed");
2427		goto restore_opts;
2428	}
2429
2430	/* disallow enable/disable extent_cache dynamically */
2431	if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
2432		err = -EINVAL;
2433		f2fs_warn(sbi, "switch extent_cache option is not allowed");
2434		goto restore_opts;
2435	}
2436	/* disallow enable/disable age extent_cache dynamically */
2437	if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
2438		err = -EINVAL;
2439		f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
2440		goto restore_opts;
2441	}
2442
2443	if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2444		err = -EINVAL;
2445		f2fs_warn(sbi, "switch io_bits option is not allowed");
2446		goto restore_opts;
2447	}
2448
2449	if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2450		err = -EINVAL;
2451		f2fs_warn(sbi, "switch compress_cache option is not allowed");
2452		goto restore_opts;
2453	}
2454
2455	if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2456		err = -EINVAL;
2457		f2fs_warn(sbi, "switch discard_unit option is not allowed");
2458		goto restore_opts;
2459	}
2460
2461	if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2462		err = -EINVAL;
2463		f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2464		goto restore_opts;
2465	}
2466
2467	/*
2468	 * We stop the GC thread if FS is mounted as RO
2469	 * or if background_gc = off is passed in mount
2470	 * option. Also sync the filesystem.
2471	 */
2472	if ((*flags & SB_RDONLY) ||
2473			(F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2474			!test_opt(sbi, GC_MERGE))) {
2475		if (sbi->gc_thread) {
2476			f2fs_stop_gc_thread(sbi);
2477			need_restart_gc = true;
2478		}
2479	} else if (!sbi->gc_thread) {
2480		err = f2fs_start_gc_thread(sbi);
2481		if (err)
2482			goto restore_opts;
2483		need_stop_gc = true;
2484	}
2485
2486	if (*flags & SB_RDONLY) {
2487		sync_inodes_sb(sb);
2488
2489		set_sbi_flag(sbi, SBI_IS_DIRTY);
2490		set_sbi_flag(sbi, SBI_IS_CLOSE);
2491		f2fs_sync_fs(sb, 1);
2492		clear_sbi_flag(sbi, SBI_IS_CLOSE);
2493	}
2494
2495	/*
2496	 * We stop issue flush thread if FS is mounted as RO
2497	 * or if flush_merge is not passed in mount option.
2498	 */
2499	if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2500		clear_opt(sbi, FLUSH_MERGE);
2501		f2fs_destroy_flush_cmd_control(sbi, false);
2502		need_restart_flush = true;
2503	} else {
2504		err = f2fs_create_flush_cmd_control(sbi);
2505		if (err)
2506			goto restore_gc;
2507		need_stop_flush = true;
2508	}
2509
2510	if (no_discard == !!test_opt(sbi, DISCARD)) {
2511		if (test_opt(sbi, DISCARD)) {
2512			err = f2fs_start_discard_thread(sbi);
2513			if (err)
2514				goto restore_flush;
2515			need_stop_discard = true;
2516		} else {
2517			f2fs_stop_discard_thread(sbi);
2518			f2fs_issue_discard_timeout(sbi);
2519			need_restart_discard = true;
2520		}
2521	}
2522
2523	if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2524		if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2525			err = f2fs_disable_checkpoint(sbi);
2526			if (err)
2527				goto restore_discard;
2528			need_enable_checkpoint = true;
2529		} else {
2530			f2fs_enable_checkpoint(sbi);
2531			need_disable_checkpoint = true;
2532		}
2533	}
2534
2535	/*
2536	 * Place this routine at the end, since a new checkpoint would be
2537	 * triggered while remount and we need to take care of it before
2538	 * returning from remount.
2539	 */
2540	if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2541			!test_opt(sbi, MERGE_CHECKPOINT)) {
2542		f2fs_stop_ckpt_thread(sbi);
2543	} else {
2544		/* Flush if the prevous checkpoint, if exists. */
2545		f2fs_flush_ckpt_thread(sbi);
2546
2547		err = f2fs_start_ckpt_thread(sbi);
2548		if (err) {
2549			f2fs_err(sbi,
2550			    "Failed to start F2FS issue_checkpoint_thread (%d)",
2551			    err);
2552			goto restore_checkpoint;
2553		}
2554	}
2555
2556skip:
2557#ifdef CONFIG_QUOTA
2558	/* Release old quota file names */
2559	for (i = 0; i < MAXQUOTAS; i++)
2560		kfree(org_mount_opt.s_qf_names[i]);
2561#endif
2562	/* Update the POSIXACL Flag */
2563	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2564		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2565
2566	limit_reserve_root(sbi);
2567	adjust_unusable_cap_perc(sbi);
2568	*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2569	return 0;
2570restore_checkpoint:
2571	if (need_enable_checkpoint) {
2572		f2fs_enable_checkpoint(sbi);
2573	} else if (need_disable_checkpoint) {
2574		if (f2fs_disable_checkpoint(sbi))
2575			f2fs_warn(sbi, "checkpoint has not been disabled");
2576	}
2577restore_discard:
2578	if (need_restart_discard) {
2579		if (f2fs_start_discard_thread(sbi))
2580			f2fs_warn(sbi, "discard has been stopped");
2581	} else if (need_stop_discard) {
2582		f2fs_stop_discard_thread(sbi);
2583	}
2584restore_flush:
2585	if (need_restart_flush) {
2586		if (f2fs_create_flush_cmd_control(sbi))
2587			f2fs_warn(sbi, "background flush thread has stopped");
2588	} else if (need_stop_flush) {
2589		clear_opt(sbi, FLUSH_MERGE);
2590		f2fs_destroy_flush_cmd_control(sbi, false);
2591	}
2592restore_gc:
2593	if (need_restart_gc) {
2594		if (f2fs_start_gc_thread(sbi))
2595			f2fs_warn(sbi, "background gc thread has stopped");
2596	} else if (need_stop_gc) {
2597		f2fs_stop_gc_thread(sbi);
2598	}
2599restore_opts:
2600#ifdef CONFIG_QUOTA
2601	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2602	for (i = 0; i < MAXQUOTAS; i++) {
2603		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2604		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2605	}
2606#endif
2607	sbi->mount_opt = org_mount_opt;
2608	sb->s_flags = old_sb_flags;
2609	return err;
2610}
2611
2612#ifdef CONFIG_QUOTA
2613static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
2614{
2615	/* need to recovery orphan */
2616	if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
2617		return true;
2618	/* need to recovery data */
2619	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
2620		return false;
2621	if (test_opt(sbi, NORECOVERY))
2622		return false;
2623	return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
2624}
2625
2626static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
2627{
2628	bool readonly = f2fs_readonly(sbi->sb);
2629
2630	if (!f2fs_need_recovery(sbi))
2631		return false;
2632
2633	/* it doesn't need to check f2fs_sb_has_readonly() */
2634	if (f2fs_hw_is_readonly(sbi))
2635		return false;
2636
2637	if (readonly) {
2638		sbi->sb->s_flags &= ~SB_RDONLY;
2639		set_sbi_flag(sbi, SBI_IS_WRITABLE);
2640	}
2641
2642	/*
2643	 * Turn on quotas which were not enabled for read-only mounts if
2644	 * filesystem has quota feature, so that they are updated correctly.
2645	 */
2646	return f2fs_enable_quota_files(sbi, readonly);
2647}
2648
2649static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
2650						bool quota_enabled)
2651{
2652	if (quota_enabled)
2653		f2fs_quota_off_umount(sbi->sb);
2654
2655	if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
2656		clear_sbi_flag(sbi, SBI_IS_WRITABLE);
2657		sbi->sb->s_flags |= SB_RDONLY;
2658	}
2659}
2660
2661/* Read data from quotafile */
2662static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2663			       size_t len, loff_t off)
2664{
2665	struct inode *inode = sb_dqopt(sb)->files[type];
2666	struct address_space *mapping = inode->i_mapping;
2667	block_t blkidx = F2FS_BYTES_TO_BLK(off);
2668	int offset = off & (sb->s_blocksize - 1);
2669	int tocopy;
2670	size_t toread;
2671	loff_t i_size = i_size_read(inode);
2672	struct page *page;
2673
2674	if (off > i_size)
2675		return 0;
2676
2677	if (off + len > i_size)
2678		len = i_size - off;
2679	toread = len;
2680	while (toread > 0) {
2681		tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2682repeat:
2683		page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2684		if (IS_ERR(page)) {
2685			if (PTR_ERR(page) == -ENOMEM) {
2686				memalloc_retry_wait(GFP_NOFS);
2687				goto repeat;
2688			}
2689			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2690			return PTR_ERR(page);
2691		}
2692
2693		lock_page(page);
2694
2695		if (unlikely(page->mapping != mapping)) {
2696			f2fs_put_page(page, 1);
2697			goto repeat;
2698		}
2699		if (unlikely(!PageUptodate(page))) {
2700			f2fs_put_page(page, 1);
2701			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2702			return -EIO;
2703		}
2704
2705		memcpy_from_page(data, page, offset, tocopy);
2706		f2fs_put_page(page, 1);
2707
2708		offset = 0;
2709		toread -= tocopy;
2710		data += tocopy;
2711		blkidx++;
2712	}
2713	return len;
2714}
2715
2716/* Write to quotafile */
2717static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2718				const char *data, size_t len, loff_t off)
2719{
2720	struct inode *inode = sb_dqopt(sb)->files[type];
2721	struct address_space *mapping = inode->i_mapping;
2722	const struct address_space_operations *a_ops = mapping->a_ops;
2723	int offset = off & (sb->s_blocksize - 1);
2724	size_t towrite = len;
2725	struct page *page;
2726	void *fsdata = NULL;
2727	int err = 0;
2728	int tocopy;
2729
2730	while (towrite > 0) {
2731		tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2732								towrite);
2733retry:
2734		err = a_ops->write_begin(NULL, mapping, off, tocopy,
2735							&page, &fsdata);
2736		if (unlikely(err)) {
2737			if (err == -ENOMEM) {
2738				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2739				goto retry;
2740			}
2741			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2742			break;
2743		}
2744
2745		memcpy_to_page(page, offset, data, tocopy);
2746
2747		a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2748						page, fsdata);
2749		offset = 0;
2750		towrite -= tocopy;
2751		off += tocopy;
2752		data += tocopy;
2753		cond_resched();
2754	}
2755
2756	if (len == towrite)
2757		return err;
2758	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2759	f2fs_mark_inode_dirty_sync(inode, false);
2760	return len - towrite;
2761}
2762
2763int f2fs_dquot_initialize(struct inode *inode)
2764{
2765	if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
2766		return -ESRCH;
2767
2768	return dquot_initialize(inode);
2769}
2770
2771static struct dquot **f2fs_get_dquots(struct inode *inode)
2772{
2773	return F2FS_I(inode)->i_dquot;
2774}
2775
2776static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2777{
2778	return &F2FS_I(inode)->i_reserved_quota;
2779}
2780
2781static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2782{
2783	if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2784		f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2785		return 0;
2786	}
2787
2788	return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2789					F2FS_OPTION(sbi).s_jquota_fmt, type);
2790}
2791
2792int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2793{
2794	int enabled = 0;
2795	int i, err;
2796
2797	if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2798		err = f2fs_enable_quotas(sbi->sb);
2799		if (err) {
2800			f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2801			return 0;
2802		}
2803		return 1;
2804	}
2805
2806	for (i = 0; i < MAXQUOTAS; i++) {
2807		if (F2FS_OPTION(sbi).s_qf_names[i]) {
2808			err = f2fs_quota_on_mount(sbi, i);
2809			if (!err) {
2810				enabled = 1;
2811				continue;
2812			}
2813			f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2814				 err, i);
2815		}
2816	}
2817	return enabled;
2818}
2819
2820static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2821			     unsigned int flags)
2822{
2823	struct inode *qf_inode;
2824	unsigned long qf_inum;
2825	unsigned long qf_flag = F2FS_QUOTA_DEFAULT_FL;
2826	int err;
2827
2828	BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2829
2830	qf_inum = f2fs_qf_ino(sb, type);
2831	if (!qf_inum)
2832		return -EPERM;
2833
2834	qf_inode = f2fs_iget(sb, qf_inum);
2835	if (IS_ERR(qf_inode)) {
2836		f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2837		return PTR_ERR(qf_inode);
2838	}
2839
2840	/* Don't account quota for quota files to avoid recursion */
2841	inode_lock(qf_inode);
2842	qf_inode->i_flags |= S_NOQUOTA;
2843
2844	if ((F2FS_I(qf_inode)->i_flags & qf_flag) != qf_flag) {
2845		F2FS_I(qf_inode)->i_flags |= qf_flag;
2846		f2fs_set_inode_flags(qf_inode);
2847	}
2848	inode_unlock(qf_inode);
2849
2850	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2851	iput(qf_inode);
2852	return err;
2853}
2854
2855static int f2fs_enable_quotas(struct super_block *sb)
2856{
2857	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2858	int type, err = 0;
2859	unsigned long qf_inum;
2860	bool quota_mopt[MAXQUOTAS] = {
2861		test_opt(sbi, USRQUOTA),
2862		test_opt(sbi, GRPQUOTA),
2863		test_opt(sbi, PRJQUOTA),
2864	};
2865
2866	if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2867		f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2868		return 0;
2869	}
2870
2871	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2872
2873	for (type = 0; type < MAXQUOTAS; type++) {
2874		qf_inum = f2fs_qf_ino(sb, type);
2875		if (qf_inum) {
2876			err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2877				DQUOT_USAGE_ENABLED |
2878				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2879			if (err) {
2880				f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2881					 type, err);
2882				for (type--; type >= 0; type--)
2883					dquot_quota_off(sb, type);
2884				set_sbi_flag(F2FS_SB(sb),
2885						SBI_QUOTA_NEED_REPAIR);
2886				return err;
2887			}
2888		}
2889	}
2890	return 0;
2891}
2892
2893static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2894{
2895	struct quota_info *dqopt = sb_dqopt(sbi->sb);
2896	struct address_space *mapping = dqopt->files[type]->i_mapping;
2897	int ret = 0;
2898
2899	ret = dquot_writeback_dquots(sbi->sb, type);
2900	if (ret)
2901		goto out;
2902
2903	ret = filemap_fdatawrite(mapping);
2904	if (ret)
2905		goto out;
2906
2907	/* if we are using journalled quota */
2908	if (is_journalled_quota(sbi))
2909		goto out;
2910
2911	ret = filemap_fdatawait(mapping);
2912
2913	truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2914out:
2915	if (ret)
2916		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2917	return ret;
2918}
2919
2920int f2fs_quota_sync(struct super_block *sb, int type)
2921{
2922	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2923	struct quota_info *dqopt = sb_dqopt(sb);
2924	int cnt;
2925	int ret = 0;
2926
2927	/*
2928	 * Now when everything is written we can discard the pagecache so
2929	 * that userspace sees the changes.
2930	 */
2931	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2932
2933		if (type != -1 && cnt != type)
2934			continue;
2935
2936		if (!sb_has_quota_active(sb, cnt))
2937			continue;
2938
2939		if (!f2fs_sb_has_quota_ino(sbi))
2940			inode_lock(dqopt->files[cnt]);
2941
2942		/*
2943		 * do_quotactl
2944		 *  f2fs_quota_sync
2945		 *  f2fs_down_read(quota_sem)
2946		 *  dquot_writeback_dquots()
2947		 *  f2fs_dquot_commit
2948		 *			      block_operation
2949		 *			      f2fs_down_read(quota_sem)
2950		 */
2951		f2fs_lock_op(sbi);
2952		f2fs_down_read(&sbi->quota_sem);
2953
2954		ret = f2fs_quota_sync_file(sbi, cnt);
2955
2956		f2fs_up_read(&sbi->quota_sem);
2957		f2fs_unlock_op(sbi);
2958
2959		if (!f2fs_sb_has_quota_ino(sbi))
2960			inode_unlock(dqopt->files[cnt]);
2961
2962		if (ret)
2963			break;
2964	}
2965	return ret;
2966}
2967
2968static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2969							const struct path *path)
2970{
2971	struct inode *inode;
2972	int err;
2973
2974	/* if quota sysfile exists, deny enabling quota with specific file */
2975	if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2976		f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2977		return -EBUSY;
2978	}
2979
2980	if (path->dentry->d_sb != sb)
2981		return -EXDEV;
2982
2983	err = f2fs_quota_sync(sb, type);
2984	if (err)
2985		return err;
2986
2987	inode = d_inode(path->dentry);
2988
2989	err = filemap_fdatawrite(inode->i_mapping);
2990	if (err)
2991		return err;
2992
2993	err = filemap_fdatawait(inode->i_mapping);
2994	if (err)
2995		return err;
2996
2997	err = dquot_quota_on(sb, type, format_id, path);
2998	if (err)
2999		return err;
3000
3001	inode_lock(inode);
3002	F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL;
3003	f2fs_set_inode_flags(inode);
3004	inode_unlock(inode);
3005	f2fs_mark_inode_dirty_sync(inode, false);
3006
3007	return 0;
3008}
3009
3010static int __f2fs_quota_off(struct super_block *sb, int type)
3011{
3012	struct inode *inode = sb_dqopt(sb)->files[type];
3013	int err;
3014
3015	if (!inode || !igrab(inode))
3016		return dquot_quota_off(sb, type);
3017
3018	err = f2fs_quota_sync(sb, type);
3019	if (err)
3020		goto out_put;
3021
3022	err = dquot_quota_off(sb, type);
3023	if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
3024		goto out_put;
3025
3026	inode_lock(inode);
3027	F2FS_I(inode)->i_flags &= ~F2FS_QUOTA_DEFAULT_FL;
3028	f2fs_set_inode_flags(inode);
3029	inode_unlock(inode);
3030	f2fs_mark_inode_dirty_sync(inode, false);
3031out_put:
3032	iput(inode);
3033	return err;
3034}
3035
3036static int f2fs_quota_off(struct super_block *sb, int type)
3037{
3038	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3039	int err;
3040
3041	err = __f2fs_quota_off(sb, type);
3042
3043	/*
3044	 * quotactl can shutdown journalled quota, result in inconsistence
3045	 * between quota record and fs data by following updates, tag the
3046	 * flag to let fsck be aware of it.
3047	 */
3048	if (is_journalled_quota(sbi))
3049		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3050	return err;
3051}
3052
3053void f2fs_quota_off_umount(struct super_block *sb)
3054{
3055	int type;
3056	int err;
3057
3058	for (type = 0; type < MAXQUOTAS; type++) {
3059		err = __f2fs_quota_off(sb, type);
3060		if (err) {
3061			int ret = dquot_quota_off(sb, type);
3062
3063			f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
3064				 type, err, ret);
3065			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
3066		}
3067	}
3068	/*
3069	 * In case of checkpoint=disable, we must flush quota blocks.
3070	 * This can cause NULL exception for node_inode in end_io, since
3071	 * put_super already dropped it.
3072	 */
3073	sync_filesystem(sb);
3074}
3075
3076static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
3077{
3078	struct quota_info *dqopt = sb_dqopt(sb);
3079	int type;
3080
3081	for (type = 0; type < MAXQUOTAS; type++) {
3082		if (!dqopt->files[type])
3083			continue;
3084		f2fs_inode_synced(dqopt->files[type]);
3085	}
3086}
3087
3088static int f2fs_dquot_commit(struct dquot *dquot)
3089{
3090	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3091	int ret;
3092
3093	f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
3094	ret = dquot_commit(dquot);
3095	if (ret < 0)
3096		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3097	f2fs_up_read(&sbi->quota_sem);
3098	return ret;
3099}
3100
3101static int f2fs_dquot_acquire(struct dquot *dquot)
3102{
3103	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3104	int ret;
3105
3106	f2fs_down_read(&sbi->quota_sem);
3107	ret = dquot_acquire(dquot);
3108	if (ret < 0)
3109		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3110	f2fs_up_read(&sbi->quota_sem);
3111	return ret;
3112}
3113
3114static int f2fs_dquot_release(struct dquot *dquot)
3115{
3116	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3117	int ret = dquot_release(dquot);
3118
3119	if (ret < 0)
3120		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3121	return ret;
3122}
3123
3124static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
3125{
3126	struct super_block *sb = dquot->dq_sb;
3127	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3128	int ret = dquot_mark_dquot_dirty(dquot);
3129
3130	/* if we are using journalled quota */
3131	if (is_journalled_quota(sbi))
3132		set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
3133
3134	return ret;
3135}
3136
3137static int f2fs_dquot_commit_info(struct super_block *sb, int type)
3138{
3139	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3140	int ret = dquot_commit_info(sb, type);
3141
3142	if (ret < 0)
3143		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3144	return ret;
3145}
3146
3147static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
3148{
3149	*projid = F2FS_I(inode)->i_projid;
3150	return 0;
3151}
3152
3153static const struct dquot_operations f2fs_quota_operations = {
3154	.get_reserved_space = f2fs_get_reserved_space,
3155	.write_dquot	= f2fs_dquot_commit,
3156	.acquire_dquot	= f2fs_dquot_acquire,
3157	.release_dquot	= f2fs_dquot_release,
3158	.mark_dirty	= f2fs_dquot_mark_dquot_dirty,
3159	.write_info	= f2fs_dquot_commit_info,
3160	.alloc_dquot	= dquot_alloc,
3161	.destroy_dquot	= dquot_destroy,
3162	.get_projid	= f2fs_get_projid,
3163	.get_next_id	= dquot_get_next_id,
3164};
3165
3166static const struct quotactl_ops f2fs_quotactl_ops = {
3167	.quota_on	= f2fs_quota_on,
3168	.quota_off	= f2fs_quota_off,
3169	.quota_sync	= f2fs_quota_sync,
3170	.get_state	= dquot_get_state,
3171	.set_info	= dquot_set_dqinfo,
3172	.get_dqblk	= dquot_get_dqblk,
3173	.set_dqblk	= dquot_set_dqblk,
3174	.get_nextdqblk	= dquot_get_next_dqblk,
3175};
3176#else
3177int f2fs_dquot_initialize(struct inode *inode)
3178{
3179	return 0;
3180}
3181
3182int f2fs_quota_sync(struct super_block *sb, int type)
3183{
3184	return 0;
3185}
3186
3187void f2fs_quota_off_umount(struct super_block *sb)
3188{
3189}
3190#endif
3191
3192static const struct super_operations f2fs_sops = {
3193	.alloc_inode	= f2fs_alloc_inode,
3194	.free_inode	= f2fs_free_inode,
3195	.drop_inode	= f2fs_drop_inode,
3196	.write_inode	= f2fs_write_inode,
3197	.dirty_inode	= f2fs_dirty_inode,
3198	.show_options	= f2fs_show_options,
3199#ifdef CONFIG_QUOTA
3200	.quota_read	= f2fs_quota_read,
3201	.quota_write	= f2fs_quota_write,
3202	.get_dquots	= f2fs_get_dquots,
3203#endif
3204	.evict_inode	= f2fs_evict_inode,
3205	.put_super	= f2fs_put_super,
3206	.sync_fs	= f2fs_sync_fs,
3207	.freeze_fs	= f2fs_freeze,
3208	.unfreeze_fs	= f2fs_unfreeze,
3209	.statfs		= f2fs_statfs,
3210	.remount_fs	= f2fs_remount,
3211};
3212
3213#ifdef CONFIG_FS_ENCRYPTION
3214static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
3215{
3216	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3217				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3218				ctx, len, NULL);
3219}
3220
3221static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
3222							void *fs_data)
3223{
3224	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3225
3226	/*
3227	 * Encrypting the root directory is not allowed because fsck
3228	 * expects lost+found directory to exist and remain unencrypted
3229	 * if LOST_FOUND feature is enabled.
3230	 *
3231	 */
3232	if (f2fs_sb_has_lost_found(sbi) &&
3233			inode->i_ino == F2FS_ROOT_INO(sbi))
3234		return -EPERM;
3235
3236	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3237				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3238				ctx, len, fs_data, XATTR_CREATE);
3239}
3240
3241static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
3242{
3243	return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
3244}
3245
3246static bool f2fs_has_stable_inodes(struct super_block *sb)
3247{
3248	return true;
3249}
3250
3251static struct block_device **f2fs_get_devices(struct super_block *sb,
3252					      unsigned int *num_devs)
3253{
3254	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3255	struct block_device **devs;
3256	int i;
3257
3258	if (!f2fs_is_multi_device(sbi))
3259		return NULL;
3260
3261	devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
3262	if (!devs)
3263		return ERR_PTR(-ENOMEM);
3264
3265	for (i = 0; i < sbi->s_ndevs; i++)
3266		devs[i] = FDEV(i).bdev;
3267	*num_devs = sbi->s_ndevs;
3268	return devs;
3269}
3270
3271static const struct fscrypt_operations f2fs_cryptops = {
3272	.needs_bounce_pages	= 1,
3273	.has_32bit_inodes	= 1,
3274	.supports_subblock_data_units = 1,
3275	.legacy_key_prefix	= "f2fs:",
3276	.get_context		= f2fs_get_context,
3277	.set_context		= f2fs_set_context,
3278	.get_dummy_policy	= f2fs_get_dummy_policy,
3279	.empty_dir		= f2fs_empty_dir,
3280	.has_stable_inodes	= f2fs_has_stable_inodes,
3281	.get_devices		= f2fs_get_devices,
3282};
3283#endif
3284
3285static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
3286		u64 ino, u32 generation)
3287{
3288	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3289	struct inode *inode;
3290
3291	if (f2fs_check_nid_range(sbi, ino))
3292		return ERR_PTR(-ESTALE);
3293
3294	/*
3295	 * f2fs_iget isn't quite right if the inode is currently unallocated!
3296	 * However f2fs_iget currently does appropriate checks to handle stale
3297	 * inodes so everything is OK.
3298	 */
3299	inode = f2fs_iget(sb, ino);
3300	if (IS_ERR(inode))
3301		return ERR_CAST(inode);
3302	if (unlikely(generation && inode->i_generation != generation)) {
3303		/* we didn't find the right inode.. */
3304		iput(inode);
3305		return ERR_PTR(-ESTALE);
3306	}
3307	return inode;
3308}
3309
3310static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3311		int fh_len, int fh_type)
3312{
3313	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3314				    f2fs_nfs_get_inode);
3315}
3316
3317static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3318		int fh_len, int fh_type)
3319{
3320	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3321				    f2fs_nfs_get_inode);
3322}
3323
3324static const struct export_operations f2fs_export_ops = {
3325	.encode_fh = generic_encode_ino32_fh,
3326	.fh_to_dentry = f2fs_fh_to_dentry,
3327	.fh_to_parent = f2fs_fh_to_parent,
3328	.get_parent = f2fs_get_parent,
3329};
3330
3331loff_t max_file_blocks(struct inode *inode)
3332{
3333	loff_t result = 0;
3334	loff_t leaf_count;
3335
3336	/*
3337	 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3338	 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3339	 * space in inode.i_addr, it will be more safe to reassign
3340	 * result as zero.
3341	 */
3342
3343	if (inode && f2fs_compressed_file(inode))
3344		leaf_count = ADDRS_PER_BLOCK(inode);
3345	else
3346		leaf_count = DEF_ADDRS_PER_BLOCK;
3347
3348	/* two direct node blocks */
3349	result += (leaf_count * 2);
3350
3351	/* two indirect node blocks */
3352	leaf_count *= NIDS_PER_BLOCK;
3353	result += (leaf_count * 2);
3354
3355	/* one double indirect node block */
3356	leaf_count *= NIDS_PER_BLOCK;
3357	result += leaf_count;
3358
3359	/*
3360	 * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
3361	 * a 4K crypto data unit, we must restrict the max filesize to what can
3362	 * fit within U32_MAX + 1 data units.
3363	 */
3364
3365	result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
3366
3367	return result;
3368}
3369
3370static int __f2fs_commit_super(struct buffer_head *bh,
3371			struct f2fs_super_block *super)
3372{
3373	lock_buffer(bh);
3374	if (super)
3375		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
3376	set_buffer_dirty(bh);
3377	unlock_buffer(bh);
3378
3379	/* it's rare case, we can do fua all the time */
3380	return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
3381}
3382
3383static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3384					struct buffer_head *bh)
3385{
3386	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3387					(bh->b_data + F2FS_SUPER_OFFSET);
3388	struct super_block *sb = sbi->sb;
3389	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3390	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3391	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3392	u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3393	u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3394	u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3395	u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3396	u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3397	u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3398	u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3399	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3400	u32 segment_count = le32_to_cpu(raw_super->segment_count);
3401	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3402	u64 main_end_blkaddr = main_blkaddr +
3403				(segment_count_main << log_blocks_per_seg);
3404	u64 seg_end_blkaddr = segment0_blkaddr +
3405				(segment_count << log_blocks_per_seg);
3406
3407	if (segment0_blkaddr != cp_blkaddr) {
3408		f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3409			  segment0_blkaddr, cp_blkaddr);
3410		return true;
3411	}
3412
3413	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3414							sit_blkaddr) {
3415		f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3416			  cp_blkaddr, sit_blkaddr,
3417			  segment_count_ckpt << log_blocks_per_seg);
3418		return true;
3419	}
3420
3421	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3422							nat_blkaddr) {
3423		f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3424			  sit_blkaddr, nat_blkaddr,
3425			  segment_count_sit << log_blocks_per_seg);
3426		return true;
3427	}
3428
3429	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3430							ssa_blkaddr) {
3431		f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3432			  nat_blkaddr, ssa_blkaddr,
3433			  segment_count_nat << log_blocks_per_seg);
3434		return true;
3435	}
3436
3437	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3438							main_blkaddr) {
3439		f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3440			  ssa_blkaddr, main_blkaddr,
3441			  segment_count_ssa << log_blocks_per_seg);
3442		return true;
3443	}
3444
3445	if (main_end_blkaddr > seg_end_blkaddr) {
3446		f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3447			  main_blkaddr, seg_end_blkaddr,
3448			  segment_count_main << log_blocks_per_seg);
3449		return true;
3450	} else if (main_end_blkaddr < seg_end_blkaddr) {
3451		int err = 0;
3452		char *res;
3453
3454		/* fix in-memory information all the time */
3455		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3456				segment0_blkaddr) >> log_blocks_per_seg);
3457
3458		if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
3459			set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3460			res = "internally";
3461		} else {
3462			err = __f2fs_commit_super(bh, NULL);
3463			res = err ? "failed" : "done";
3464		}
3465		f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3466			  res, main_blkaddr, seg_end_blkaddr,
3467			  segment_count_main << log_blocks_per_seg);
3468		if (err)
3469			return true;
3470	}
3471	return false;
3472}
3473
3474static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3475				struct buffer_head *bh)
3476{
3477	block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3478	block_t total_sections, blocks_per_seg;
3479	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3480					(bh->b_data + F2FS_SUPER_OFFSET);
3481	size_t crc_offset = 0;
3482	__u32 crc = 0;
3483
3484	if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3485		f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3486			  F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3487		return -EINVAL;
3488	}
3489
3490	/* Check checksum_offset and crc in superblock */
3491	if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3492		crc_offset = le32_to_cpu(raw_super->checksum_offset);
3493		if (crc_offset !=
3494			offsetof(struct f2fs_super_block, crc)) {
3495			f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3496				  crc_offset);
3497			return -EFSCORRUPTED;
3498		}
3499		crc = le32_to_cpu(raw_super->crc);
3500		if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3501			f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3502			return -EFSCORRUPTED;
3503		}
3504	}
3505
3506	/* Currently, support only 4KB block size */
3507	if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3508		f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3509			  le32_to_cpu(raw_super->log_blocksize),
3510			  F2FS_BLKSIZE_BITS);
3511		return -EFSCORRUPTED;
3512	}
3513
3514	/* check log blocks per segment */
3515	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3516		f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3517			  le32_to_cpu(raw_super->log_blocks_per_seg));
3518		return -EFSCORRUPTED;
3519	}
3520
3521	/* Currently, support 512/1024/2048/4096/16K bytes sector size */
3522	if (le32_to_cpu(raw_super->log_sectorsize) >
3523				F2FS_MAX_LOG_SECTOR_SIZE ||
3524		le32_to_cpu(raw_super->log_sectorsize) <
3525				F2FS_MIN_LOG_SECTOR_SIZE) {
3526		f2fs_info(sbi, "Invalid log sectorsize (%u)",
3527			  le32_to_cpu(raw_super->log_sectorsize));
3528		return -EFSCORRUPTED;
3529	}
3530	if (le32_to_cpu(raw_super->log_sectors_per_block) +
3531		le32_to_cpu(raw_super->log_sectorsize) !=
3532			F2FS_MAX_LOG_SECTOR_SIZE) {
3533		f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3534			  le32_to_cpu(raw_super->log_sectors_per_block),
3535			  le32_to_cpu(raw_super->log_sectorsize));
3536		return -EFSCORRUPTED;
3537	}
3538
3539	segment_count = le32_to_cpu(raw_super->segment_count);
3540	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3541	segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3542	secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3543	total_sections = le32_to_cpu(raw_super->section_count);
3544
3545	/* blocks_per_seg should be 512, given the above check */
3546	blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
3547
3548	if (segment_count > F2FS_MAX_SEGMENT ||
3549				segment_count < F2FS_MIN_SEGMENTS) {
3550		f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3551		return -EFSCORRUPTED;
3552	}
3553
3554	if (total_sections > segment_count_main || total_sections < 1 ||
3555			segs_per_sec > segment_count || !segs_per_sec) {
3556		f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3557			  segment_count, total_sections, segs_per_sec);
3558		return -EFSCORRUPTED;
3559	}
3560
3561	if (segment_count_main != total_sections * segs_per_sec) {
3562		f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3563			  segment_count_main, total_sections, segs_per_sec);
3564		return -EFSCORRUPTED;
3565	}
3566
3567	if ((segment_count / segs_per_sec) < total_sections) {
3568		f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3569			  segment_count, segs_per_sec, total_sections);
3570		return -EFSCORRUPTED;
3571	}
3572
3573	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3574		f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3575			  segment_count, le64_to_cpu(raw_super->block_count));
3576		return -EFSCORRUPTED;
3577	}
3578
3579	if (RDEV(0).path[0]) {
3580		block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3581		int i = 1;
3582
3583		while (i < MAX_DEVICES && RDEV(i).path[0]) {
3584			dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3585			i++;
3586		}
3587		if (segment_count != dev_seg_count) {
3588			f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3589					segment_count, dev_seg_count);
3590			return -EFSCORRUPTED;
3591		}
3592	} else {
3593		if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3594					!bdev_is_zoned(sbi->sb->s_bdev)) {
3595			f2fs_info(sbi, "Zoned block device path is missing");
3596			return -EFSCORRUPTED;
3597		}
3598	}
3599
3600	if (secs_per_zone > total_sections || !secs_per_zone) {
3601		f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3602			  secs_per_zone, total_sections);
3603		return -EFSCORRUPTED;
3604	}
3605	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3606			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3607			(le32_to_cpu(raw_super->extension_count) +
3608			raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3609		f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3610			  le32_to_cpu(raw_super->extension_count),
3611			  raw_super->hot_ext_count,
3612			  F2FS_MAX_EXTENSION);
3613		return -EFSCORRUPTED;
3614	}
3615
3616	if (le32_to_cpu(raw_super->cp_payload) >=
3617				(blocks_per_seg - F2FS_CP_PACKS -
3618				NR_CURSEG_PERSIST_TYPE)) {
3619		f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
3620			  le32_to_cpu(raw_super->cp_payload),
3621			  blocks_per_seg - F2FS_CP_PACKS -
3622			  NR_CURSEG_PERSIST_TYPE);
3623		return -EFSCORRUPTED;
3624	}
3625
3626	/* check reserved ino info */
3627	if (le32_to_cpu(raw_super->node_ino) != 1 ||
3628		le32_to_cpu(raw_super->meta_ino) != 2 ||
3629		le32_to_cpu(raw_super->root_ino) != 3) {
3630		f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3631			  le32_to_cpu(raw_super->node_ino),
3632			  le32_to_cpu(raw_super->meta_ino),
3633			  le32_to_cpu(raw_super->root_ino));
3634		return -EFSCORRUPTED;
3635	}
3636
3637	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3638	if (sanity_check_area_boundary(sbi, bh))
3639		return -EFSCORRUPTED;
3640
3641	return 0;
3642}
3643
3644int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3645{
3646	unsigned int total, fsmeta;
3647	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3648	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3649	unsigned int ovp_segments, reserved_segments;
3650	unsigned int main_segs, blocks_per_seg;
3651	unsigned int sit_segs, nat_segs;
3652	unsigned int sit_bitmap_size, nat_bitmap_size;
3653	unsigned int log_blocks_per_seg;
3654	unsigned int segment_count_main;
3655	unsigned int cp_pack_start_sum, cp_payload;
3656	block_t user_block_count, valid_user_blocks;
3657	block_t avail_node_count, valid_node_count;
3658	unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
3659	int i, j;
3660
3661	total = le32_to_cpu(raw_super->segment_count);
3662	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3663	sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3664	fsmeta += sit_segs;
3665	nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3666	fsmeta += nat_segs;
3667	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3668	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3669
3670	if (unlikely(fsmeta >= total))
3671		return 1;
3672
3673	ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3674	reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3675
3676	if (!f2fs_sb_has_readonly(sbi) &&
3677			unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3678			ovp_segments == 0 || reserved_segments == 0)) {
3679		f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3680		return 1;
3681	}
3682	user_block_count = le64_to_cpu(ckpt->user_block_count);
3683	segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3684			(f2fs_sb_has_readonly(sbi) ? 1 : 0);
3685	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3686	if (!user_block_count || user_block_count >=
3687			segment_count_main << log_blocks_per_seg) {
3688		f2fs_err(sbi, "Wrong user_block_count: %u",
3689			 user_block_count);
3690		return 1;
3691	}
3692
3693	valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3694	if (valid_user_blocks > user_block_count) {
3695		f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3696			 valid_user_blocks, user_block_count);
3697		return 1;
3698	}
3699
3700	valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3701	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3702	if (valid_node_count > avail_node_count) {
3703		f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3704			 valid_node_count, avail_node_count);
3705		return 1;
3706	}
3707
3708	main_segs = le32_to_cpu(raw_super->segment_count_main);
3709	blocks_per_seg = sbi->blocks_per_seg;
3710
3711	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3712		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3713			le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3714			return 1;
3715
3716		if (f2fs_sb_has_readonly(sbi))
3717			goto check_data;
3718
3719		for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3720			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3721				le32_to_cpu(ckpt->cur_node_segno[j])) {
3722				f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3723					 i, j,
3724					 le32_to_cpu(ckpt->cur_node_segno[i]));
3725				return 1;
3726			}
3727		}
3728	}
3729check_data:
3730	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3731		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3732			le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3733			return 1;
3734
3735		if (f2fs_sb_has_readonly(sbi))
3736			goto skip_cross;
3737
3738		for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3739			if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3740				le32_to_cpu(ckpt->cur_data_segno[j])) {
3741				f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3742					 i, j,
3743					 le32_to_cpu(ckpt->cur_data_segno[i]));
3744				return 1;
3745			}
3746		}
3747	}
3748	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3749		for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3750			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3751				le32_to_cpu(ckpt->cur_data_segno[j])) {
3752				f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3753					 i, j,
3754					 le32_to_cpu(ckpt->cur_node_segno[i]));
3755				return 1;
3756			}
3757		}
3758	}
3759skip_cross:
3760	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3761	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3762
3763	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3764		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3765		f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3766			 sit_bitmap_size, nat_bitmap_size);
3767		return 1;
3768	}
3769
3770	cp_pack_start_sum = __start_sum_addr(sbi);
3771	cp_payload = __cp_payload(sbi);
3772	if (cp_pack_start_sum < cp_payload + 1 ||
3773		cp_pack_start_sum > blocks_per_seg - 1 -
3774			NR_CURSEG_PERSIST_TYPE) {
3775		f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3776			 cp_pack_start_sum);
3777		return 1;
3778	}
3779
3780	if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3781		le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3782		f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3783			  "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3784			  "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3785			  le32_to_cpu(ckpt->checksum_offset));
3786		return 1;
3787	}
3788
3789	nat_blocks = nat_segs << log_blocks_per_seg;
3790	nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3791	nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3792	if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3793		(cp_payload + F2FS_CP_PACKS +
3794		NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3795		f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3796			  cp_payload, nat_bits_blocks);
3797		return 1;
3798	}
3799
3800	if (unlikely(f2fs_cp_error(sbi))) {
3801		f2fs_err(sbi, "A bug case: need to run fsck");
3802		return 1;
3803	}
3804	return 0;
3805}
3806
3807static void init_sb_info(struct f2fs_sb_info *sbi)
3808{
3809	struct f2fs_super_block *raw_super = sbi->raw_super;
3810	int i;
3811
3812	sbi->log_sectors_per_block =
3813		le32_to_cpu(raw_super->log_sectors_per_block);
3814	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3815	sbi->blocksize = BIT(sbi->log_blocksize);
3816	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3817	sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
3818	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3819	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3820	sbi->total_sections = le32_to_cpu(raw_super->section_count);
3821	sbi->total_node_count =
3822		(le32_to_cpu(raw_super->segment_count_nat) / 2)
3823			* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3824	F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3825	F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3826	F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3827	sbi->cur_victim_sec = NULL_SECNO;
3828	sbi->gc_mode = GC_NORMAL;
3829	sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3830	sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3831	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3832	sbi->migration_granularity = sbi->segs_per_sec;
3833	sbi->seq_file_ra_mul = MIN_RA_MUL;
3834	sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
3835	sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
3836	spin_lock_init(&sbi->gc_remaining_trials_lock);
3837	atomic64_set(&sbi->current_atomic_write, 0);
3838
3839	sbi->dir_level = DEF_DIR_LEVEL;
3840	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3841	sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3842	sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3843	sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3844	sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3845	sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3846				DEF_UMOUNT_DISCARD_TIMEOUT;
3847	clear_sbi_flag(sbi, SBI_NEED_FSCK);
3848
3849	for (i = 0; i < NR_COUNT_TYPE; i++)
3850		atomic_set(&sbi->nr_pages[i], 0);
3851
3852	for (i = 0; i < META; i++)
3853		atomic_set(&sbi->wb_sync_req[i], 0);
3854
3855	INIT_LIST_HEAD(&sbi->s_list);
3856	mutex_init(&sbi->umount_mutex);
3857	init_f2fs_rwsem(&sbi->io_order_lock);
3858	spin_lock_init(&sbi->cp_lock);
3859
3860	sbi->dirty_device = 0;
3861	spin_lock_init(&sbi->dev_lock);
3862
3863	init_f2fs_rwsem(&sbi->sb_lock);
3864	init_f2fs_rwsem(&sbi->pin_sem);
3865}
3866
3867static int init_percpu_info(struct f2fs_sb_info *sbi)
3868{
3869	int err;
3870
3871	err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3872	if (err)
3873		return err;
3874
3875	err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
3876	if (err)
3877		goto err_valid_block;
3878
3879	err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3880								GFP_KERNEL);
3881	if (err)
3882		goto err_node_block;
3883	return 0;
3884
3885err_node_block:
3886	percpu_counter_destroy(&sbi->rf_node_block_count);
3887err_valid_block:
3888	percpu_counter_destroy(&sbi->alloc_valid_block_count);
3889	return err;
3890}
3891
3892#ifdef CONFIG_BLK_DEV_ZONED
3893
3894struct f2fs_report_zones_args {
3895	struct f2fs_sb_info *sbi;
3896	struct f2fs_dev_info *dev;
3897};
3898
3899static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3900			      void *data)
3901{
3902	struct f2fs_report_zones_args *rz_args = data;
3903	block_t unusable_blocks = (zone->len - zone->capacity) >>
3904					F2FS_LOG_SECTORS_PER_BLOCK;
3905
3906	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3907		return 0;
3908
3909	set_bit(idx, rz_args->dev->blkz_seq);
3910	if (!rz_args->sbi->unusable_blocks_per_sec) {
3911		rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
3912		return 0;
3913	}
3914	if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
3915		f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
3916		return -EINVAL;
3917	}
3918	return 0;
3919}
3920
3921static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3922{
3923	struct block_device *bdev = FDEV(devi).bdev;
3924	sector_t nr_sectors = bdev_nr_sectors(bdev);
3925	struct f2fs_report_zones_args rep_zone_arg;
3926	u64 zone_sectors;
3927	int ret;
3928
3929	if (!f2fs_sb_has_blkzoned(sbi))
3930		return 0;
3931
3932	zone_sectors = bdev_zone_sectors(bdev);
3933	if (!is_power_of_2(zone_sectors)) {
3934		f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
3935		return -EINVAL;
3936	}
3937
3938	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3939				SECTOR_TO_BLOCK(zone_sectors))
3940		return -EINVAL;
3941	sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
3942	FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
3943					sbi->blocks_per_blkz);
3944	if (nr_sectors & (zone_sectors - 1))
3945		FDEV(devi).nr_blkz++;
3946
3947	FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3948					BITS_TO_LONGS(FDEV(devi).nr_blkz)
3949					* sizeof(unsigned long),
3950					GFP_KERNEL);
3951	if (!FDEV(devi).blkz_seq)
3952		return -ENOMEM;
3953
3954	rep_zone_arg.sbi = sbi;
3955	rep_zone_arg.dev = &FDEV(devi);
3956
3957	ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3958				  &rep_zone_arg);
3959	if (ret < 0)
3960		return ret;
3961	return 0;
3962}
3963#endif
3964
3965/*
3966 * Read f2fs raw super block.
3967 * Because we have two copies of super block, so read both of them
3968 * to get the first valid one. If any one of them is broken, we pass
3969 * them recovery flag back to the caller.
3970 */
3971static int read_raw_super_block(struct f2fs_sb_info *sbi,
3972			struct f2fs_super_block **raw_super,
3973			int *valid_super_block, int *recovery)
3974{
3975	struct super_block *sb = sbi->sb;
3976	int block;
3977	struct buffer_head *bh;
3978	struct f2fs_super_block *super;
3979	int err = 0;
3980
3981	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3982	if (!super)
3983		return -ENOMEM;
3984
3985	for (block = 0; block < 2; block++) {
3986		bh = sb_bread(sb, block);
3987		if (!bh) {
3988			f2fs_err(sbi, "Unable to read %dth superblock",
3989				 block + 1);
3990			err = -EIO;
3991			*recovery = 1;
3992			continue;
3993		}
3994
3995		/* sanity checking of raw super */
3996		err = sanity_check_raw_super(sbi, bh);
3997		if (err) {
3998			f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3999				 block + 1);
4000			brelse(bh);
4001			*recovery = 1;
4002			continue;
4003		}
4004
4005		if (!*raw_super) {
4006			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
4007							sizeof(*super));
4008			*valid_super_block = block;
4009			*raw_super = super;
4010		}
4011		brelse(bh);
4012	}
4013
4014	/* No valid superblock */
4015	if (!*raw_super)
4016		kfree(super);
4017	else
4018		err = 0;
4019
4020	return err;
4021}
4022
4023int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
4024{
4025	struct buffer_head *bh;
4026	__u32 crc = 0;
4027	int err;
4028
4029	if ((recover && f2fs_readonly(sbi->sb)) ||
4030				f2fs_hw_is_readonly(sbi)) {
4031		set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
4032		return -EROFS;
4033	}
4034
4035	/* we should update superblock crc here */
4036	if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
4037		crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
4038				offsetof(struct f2fs_super_block, crc));
4039		F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
4040	}
4041
4042	/* write back-up superblock first */
4043	bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
4044	if (!bh)
4045		return -EIO;
4046	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
4047	brelse(bh);
4048
4049	/* if we are in recovery path, skip writing valid superblock */
4050	if (recover || err)
4051		return err;
4052
4053	/* write current valid superblock */
4054	bh = sb_bread(sbi->sb, sbi->valid_super_block);
4055	if (!bh)
4056		return -EIO;
4057	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
4058	brelse(bh);
4059	return err;
4060}
4061
4062static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
4063{
4064	unsigned long flags;
4065
4066	spin_lock_irqsave(&sbi->error_lock, flags);
4067	if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
4068		sbi->stop_reason[reason]++;
4069	spin_unlock_irqrestore(&sbi->error_lock, flags);
4070}
4071
4072static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
4073{
4074	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4075	unsigned long flags;
4076	int err;
4077
4078	f2fs_down_write(&sbi->sb_lock);
4079
4080	spin_lock_irqsave(&sbi->error_lock, flags);
4081	if (sbi->error_dirty) {
4082		memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
4083							MAX_F2FS_ERRORS);
4084		sbi->error_dirty = false;
4085	}
4086	memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
4087	spin_unlock_irqrestore(&sbi->error_lock, flags);
4088
4089	err = f2fs_commit_super(sbi, false);
4090
4091	f2fs_up_write(&sbi->sb_lock);
4092	if (err)
4093		f2fs_err(sbi, "f2fs_commit_super fails to record err:%d", err);
4094}
4095
4096void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
4097{
4098	unsigned long flags;
4099
4100	spin_lock_irqsave(&sbi->error_lock, flags);
4101	if (!test_bit(flag, (unsigned long *)sbi->errors)) {
4102		set_bit(flag, (unsigned long *)sbi->errors);
4103		sbi->error_dirty = true;
4104	}
4105	spin_unlock_irqrestore(&sbi->error_lock, flags);
4106}
4107
4108static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
4109{
4110	unsigned long flags;
4111	bool need_update = false;
4112
4113	spin_lock_irqsave(&sbi->error_lock, flags);
4114	if (sbi->error_dirty) {
4115		memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
4116							MAX_F2FS_ERRORS);
4117		sbi->error_dirty = false;
4118		need_update = true;
4119	}
4120	spin_unlock_irqrestore(&sbi->error_lock, flags);
4121
4122	return need_update;
4123}
4124
4125static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error)
4126{
4127	int err;
4128
4129	f2fs_down_write(&sbi->sb_lock);
4130
4131	if (!f2fs_update_errors(sbi))
4132		goto out_unlock;
4133
4134	err = f2fs_commit_super(sbi, false);
4135	if (err)
4136		f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d",
4137								error, err);
4138out_unlock:
4139	f2fs_up_write(&sbi->sb_lock);
4140}
4141
4142void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
4143{
4144	f2fs_save_errors(sbi, error);
4145	f2fs_record_errors(sbi, error);
4146}
4147
4148void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error)
4149{
4150	f2fs_save_errors(sbi, error);
4151
4152	if (!sbi->error_dirty)
4153		return;
4154	if (!test_bit(error, (unsigned long *)sbi->errors))
4155		return;
4156	schedule_work(&sbi->s_error_work);
4157}
4158
4159static bool system_going_down(void)
4160{
4161	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
4162		|| system_state == SYSTEM_RESTART;
4163}
4164
4165void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
4166							bool irq_context)
4167{
4168	struct super_block *sb = sbi->sb;
4169	bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
4170	bool continue_fs = !shutdown &&
4171			F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
4172
4173	set_ckpt_flags(sbi, CP_ERROR_FLAG);
4174
4175	if (!f2fs_hw_is_readonly(sbi)) {
4176		save_stop_reason(sbi, reason);
4177
4178		if (irq_context && !shutdown)
4179			schedule_work(&sbi->s_error_work);
4180		else
4181			f2fs_record_stop_reason(sbi);
4182	}
4183
4184	/*
4185	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
4186	 * could panic during 'reboot -f' as the underlying device got already
4187	 * disabled.
4188	 */
4189	if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
4190				!shutdown && !system_going_down() &&
4191				!is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
4192		panic("F2FS-fs (device %s): panic forced after error\n",
4193							sb->s_id);
4194
4195	if (shutdown)
4196		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
4197
4198	/* continue filesystem operators if errors=continue */
4199	if (continue_fs || f2fs_readonly(sb))
4200		return;
4201
4202	f2fs_warn(sbi, "Remounting filesystem read-only");
4203	/*
4204	 * Make sure updated value of ->s_mount_flags will be visible before
4205	 * ->s_flags update
4206	 */
4207	smp_wmb();
4208	sb->s_flags |= SB_RDONLY;
4209}
4210
4211static void f2fs_record_error_work(struct work_struct *work)
4212{
4213	struct f2fs_sb_info *sbi = container_of(work,
4214					struct f2fs_sb_info, s_error_work);
4215
4216	f2fs_record_stop_reason(sbi);
4217}
4218
4219static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
4220{
4221	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4222	unsigned int max_devices = MAX_DEVICES;
4223	unsigned int logical_blksize;
4224	blk_mode_t mode = sb_open_mode(sbi->sb->s_flags);
4225	int i;
4226
4227	/* Initialize single device information */
4228	if (!RDEV(0).path[0]) {
4229		if (!bdev_is_zoned(sbi->sb->s_bdev))
4230			return 0;
4231		max_devices = 1;
4232	}
4233
4234	/*
4235	 * Initialize multiple devices information, or single
4236	 * zoned block device information.
4237	 */
4238	sbi->devs = f2fs_kzalloc(sbi,
4239				 array_size(max_devices,
4240					    sizeof(struct f2fs_dev_info)),
4241				 GFP_KERNEL);
4242	if (!sbi->devs)
4243		return -ENOMEM;
4244
4245	logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
4246	sbi->aligned_blksize = true;
4247
4248	for (i = 0; i < max_devices; i++) {
4249		if (i == 0)
4250			FDEV(0).bdev_handle = sbi->sb->s_bdev_handle;
4251		else if (!RDEV(i).path[0])
4252			break;
4253
4254		if (max_devices > 1) {
4255			/* Multi-device mount */
4256			memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
4257			FDEV(i).total_segments =
4258				le32_to_cpu(RDEV(i).total_segments);
4259			if (i == 0) {
4260				FDEV(i).start_blk = 0;
4261				FDEV(i).end_blk = FDEV(i).start_blk +
4262				    (FDEV(i).total_segments <<
4263				    sbi->log_blocks_per_seg) - 1 +
4264				    le32_to_cpu(raw_super->segment0_blkaddr);
4265			} else {
4266				FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
4267				FDEV(i).end_blk = FDEV(i).start_blk +
4268					(FDEV(i).total_segments <<
4269					sbi->log_blocks_per_seg) - 1;
4270				FDEV(i).bdev_handle = bdev_open_by_path(
4271					FDEV(i).path, mode, sbi->sb, NULL);
4272			}
4273		}
4274		if (IS_ERR(FDEV(i).bdev_handle))
4275			return PTR_ERR(FDEV(i).bdev_handle);
4276
4277		FDEV(i).bdev = FDEV(i).bdev_handle->bdev;
4278		/* to release errored devices */
4279		sbi->s_ndevs = i + 1;
4280
4281		if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
4282			sbi->aligned_blksize = false;
4283
4284#ifdef CONFIG_BLK_DEV_ZONED
4285		if (bdev_is_zoned(FDEV(i).bdev)) {
4286			if (!f2fs_sb_has_blkzoned(sbi)) {
4287				f2fs_err(sbi, "Zoned block device feature not enabled");
4288				return -EINVAL;
4289			}
4290			if (init_blkz_info(sbi, i)) {
4291				f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
4292				return -EINVAL;
4293			}
4294			if (max_devices == 1)
4295				break;
4296			f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)",
4297				  i, FDEV(i).path,
4298				  FDEV(i).total_segments,
4299				  FDEV(i).start_blk, FDEV(i).end_blk);
4300			continue;
4301		}
4302#endif
4303		f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
4304			  i, FDEV(i).path,
4305			  FDEV(i).total_segments,
4306			  FDEV(i).start_blk, FDEV(i).end_blk);
4307	}
4308	f2fs_info(sbi,
4309		  "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi));
4310	return 0;
4311}
4312
4313static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
4314{
4315#if IS_ENABLED(CONFIG_UNICODE)
4316	if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
4317		const struct f2fs_sb_encodings *encoding_info;
4318		struct unicode_map *encoding;
4319		__u16 encoding_flags;
4320
4321		encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
4322		if (!encoding_info) {
4323			f2fs_err(sbi,
4324				 "Encoding requested by superblock is unknown");
4325			return -EINVAL;
4326		}
4327
4328		encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
4329		encoding = utf8_load(encoding_info->version);
4330		if (IS_ERR(encoding)) {
4331			f2fs_err(sbi,
4332				 "can't mount with superblock charset: %s-%u.%u.%u "
4333				 "not supported by the kernel. flags: 0x%x.",
4334				 encoding_info->name,
4335				 unicode_major(encoding_info->version),
4336				 unicode_minor(encoding_info->version),
4337				 unicode_rev(encoding_info->version),
4338				 encoding_flags);
4339			return PTR_ERR(encoding);
4340		}
4341		f2fs_info(sbi, "Using encoding defined by superblock: "
4342			 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
4343			 unicode_major(encoding_info->version),
4344			 unicode_minor(encoding_info->version),
4345			 unicode_rev(encoding_info->version),
4346			 encoding_flags);
4347
4348		sbi->sb->s_encoding = encoding;
4349		sbi->sb->s_encoding_flags = encoding_flags;
4350	}
4351#else
4352	if (f2fs_sb_has_casefold(sbi)) {
4353		f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
4354		return -EINVAL;
4355	}
4356#endif
4357	return 0;
4358}
4359
4360static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
4361{
4362	/* adjust parameters according to the volume size */
4363	if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
4364		if (f2fs_block_unit_discard(sbi))
4365			SM_I(sbi)->dcc_info->discard_granularity =
4366						MIN_DISCARD_GRANULARITY;
4367		if (!f2fs_lfs_mode(sbi))
4368			SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
4369						BIT(F2FS_IPU_HONOR_OPU_WRITE);
4370	}
4371
4372	sbi->readdir_ra = true;
4373}
4374
4375static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
4376{
4377	struct f2fs_sb_info *sbi;
4378	struct f2fs_super_block *raw_super;
4379	struct inode *root;
4380	int err;
4381	bool skip_recovery = false, need_fsck = false;
4382	char *options = NULL;
4383	int recovery, i, valid_super_block;
4384	struct curseg_info *seg_i;
4385	int retry_cnt = 1;
4386#ifdef CONFIG_QUOTA
4387	bool quota_enabled = false;
4388#endif
4389
4390try_onemore:
4391	err = -EINVAL;
4392	raw_super = NULL;
4393	valid_super_block = -1;
4394	recovery = 0;
4395
4396	/* allocate memory for f2fs-specific super block info */
4397	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
4398	if (!sbi)
4399		return -ENOMEM;
4400
4401	sbi->sb = sb;
4402
4403	/* initialize locks within allocated memory */
4404	init_f2fs_rwsem(&sbi->gc_lock);
4405	mutex_init(&sbi->writepages);
4406	init_f2fs_rwsem(&sbi->cp_global_sem);
4407	init_f2fs_rwsem(&sbi->node_write);
4408	init_f2fs_rwsem(&sbi->node_change);
4409	spin_lock_init(&sbi->stat_lock);
4410	init_f2fs_rwsem(&sbi->cp_rwsem);
4411	init_f2fs_rwsem(&sbi->quota_sem);
4412	init_waitqueue_head(&sbi->cp_wait);
4413	spin_lock_init(&sbi->error_lock);
4414
4415	for (i = 0; i < NR_INODE_TYPE; i++) {
4416		INIT_LIST_HEAD(&sbi->inode_list[i]);
4417		spin_lock_init(&sbi->inode_lock[i]);
4418	}
4419	mutex_init(&sbi->flush_lock);
4420
4421	/* Load the checksum driver */
4422	sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
4423	if (IS_ERR(sbi->s_chksum_driver)) {
4424		f2fs_err(sbi, "Cannot load crc32 driver.");
4425		err = PTR_ERR(sbi->s_chksum_driver);
4426		sbi->s_chksum_driver = NULL;
4427		goto free_sbi;
4428	}
4429
4430	/* set a block size */
4431	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
4432		f2fs_err(sbi, "unable to set blocksize");
4433		goto free_sbi;
4434	}
4435
4436	err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
4437								&recovery);
4438	if (err)
4439		goto free_sbi;
4440
4441	sb->s_fs_info = sbi;
4442	sbi->raw_super = raw_super;
4443
4444	INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
4445	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
4446	memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
4447
4448	/* precompute checksum seed for metadata */
4449	if (f2fs_sb_has_inode_chksum(sbi))
4450		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
4451						sizeof(raw_super->uuid));
4452
4453	default_options(sbi, false);
4454	/* parse mount options */
4455	options = kstrdup((const char *)data, GFP_KERNEL);
4456	if (data && !options) {
4457		err = -ENOMEM;
4458		goto free_sb_buf;
4459	}
4460
4461	err = parse_options(sb, options, false);
4462	if (err)
4463		goto free_options;
4464
4465	sb->s_maxbytes = max_file_blocks(NULL) <<
4466				le32_to_cpu(raw_super->log_blocksize);
4467	sb->s_max_links = F2FS_LINK_MAX;
4468
4469	err = f2fs_setup_casefold(sbi);
4470	if (err)
4471		goto free_options;
4472
4473#ifdef CONFIG_QUOTA
4474	sb->dq_op = &f2fs_quota_operations;
4475	sb->s_qcop = &f2fs_quotactl_ops;
4476	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4477
4478	if (f2fs_sb_has_quota_ino(sbi)) {
4479		for (i = 0; i < MAXQUOTAS; i++) {
4480			if (f2fs_qf_ino(sbi->sb, i))
4481				sbi->nquota_files++;
4482		}
4483	}
4484#endif
4485
4486	sb->s_op = &f2fs_sops;
4487#ifdef CONFIG_FS_ENCRYPTION
4488	sb->s_cop = &f2fs_cryptops;
4489#endif
4490#ifdef CONFIG_FS_VERITY
4491	sb->s_vop = &f2fs_verityops;
4492#endif
4493	sb->s_xattr = f2fs_xattr_handlers;
4494	sb->s_export_op = &f2fs_export_ops;
4495	sb->s_magic = F2FS_SUPER_MAGIC;
4496	sb->s_time_gran = 1;
4497	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4498		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
4499	memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
4500	sb->s_iflags |= SB_I_CGROUPWB;
4501
4502	/* init f2fs-specific super block info */
4503	sbi->valid_super_block = valid_super_block;
4504
4505	/* disallow all the data/node/meta page writes */
4506	set_sbi_flag(sbi, SBI_POR_DOING);
4507
4508	err = f2fs_init_write_merge_io(sbi);
4509	if (err)
4510		goto free_bio_info;
4511
4512	init_sb_info(sbi);
4513
4514	err = f2fs_init_iostat(sbi);
4515	if (err)
4516		goto free_bio_info;
4517
4518	err = init_percpu_info(sbi);
4519	if (err)
4520		goto free_iostat;
4521
4522	if (F2FS_IO_ALIGNED(sbi)) {
4523		sbi->write_io_dummy =
4524			mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
4525		if (!sbi->write_io_dummy) {
4526			err = -ENOMEM;
4527			goto free_percpu;
4528		}
4529	}
4530
4531	/* init per sbi slab cache */
4532	err = f2fs_init_xattr_caches(sbi);
4533	if (err)
4534		goto free_io_dummy;
4535	err = f2fs_init_page_array_cache(sbi);
4536	if (err)
4537		goto free_xattr_cache;
4538
4539	/* get an inode for meta space */
4540	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
4541	if (IS_ERR(sbi->meta_inode)) {
4542		f2fs_err(sbi, "Failed to read F2FS meta data inode");
4543		err = PTR_ERR(sbi->meta_inode);
4544		goto free_page_array_cache;
4545	}
4546
4547	err = f2fs_get_valid_checkpoint(sbi);
4548	if (err) {
4549		f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
4550		goto free_meta_inode;
4551	}
4552
4553	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
4554		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
4555	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
4556		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4557		sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
4558	}
4559
4560	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
4561		set_sbi_flag(sbi, SBI_NEED_FSCK);
4562
4563	/* Initialize device list */
4564	err = f2fs_scan_devices(sbi);
4565	if (err) {
4566		f2fs_err(sbi, "Failed to find devices");
4567		goto free_devices;
4568	}
4569
4570	err = f2fs_init_post_read_wq(sbi);
4571	if (err) {
4572		f2fs_err(sbi, "Failed to initialize post read workqueue");
4573		goto free_devices;
4574	}
4575
4576	sbi->total_valid_node_count =
4577				le32_to_cpu(sbi->ckpt->valid_node_count);
4578	percpu_counter_set(&sbi->total_valid_inode_count,
4579				le32_to_cpu(sbi->ckpt->valid_inode_count));
4580	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
4581	sbi->total_valid_block_count =
4582				le64_to_cpu(sbi->ckpt->valid_block_count);
4583	sbi->last_valid_block_count = sbi->total_valid_block_count;
4584	sbi->reserved_blocks = 0;
4585	sbi->current_reserved_blocks = 0;
4586	limit_reserve_root(sbi);
4587	adjust_unusable_cap_perc(sbi);
4588
4589	f2fs_init_extent_cache_info(sbi);
4590
4591	f2fs_init_ino_entry_info(sbi);
4592
4593	f2fs_init_fsync_node_info(sbi);
4594
4595	/* setup checkpoint request control and start checkpoint issue thread */
4596	f2fs_init_ckpt_req_control(sbi);
4597	if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4598			test_opt(sbi, MERGE_CHECKPOINT)) {
4599		err = f2fs_start_ckpt_thread(sbi);
4600		if (err) {
4601			f2fs_err(sbi,
4602			    "Failed to start F2FS issue_checkpoint_thread (%d)",
4603			    err);
4604			goto stop_ckpt_thread;
4605		}
4606	}
4607
4608	/* setup f2fs internal modules */
4609	err = f2fs_build_segment_manager(sbi);
4610	if (err) {
4611		f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4612			 err);
4613		goto free_sm;
4614	}
4615	err = f2fs_build_node_manager(sbi);
4616	if (err) {
4617		f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4618			 err);
4619		goto free_nm;
4620	}
4621
4622	err = adjust_reserved_segment(sbi);
4623	if (err)
4624		goto free_nm;
4625
4626	/* For write statistics */
4627	sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4628
4629	/* Read accumulated write IO statistics if exists */
4630	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4631	if (__exist_node_summaries(sbi))
4632		sbi->kbytes_written =
4633			le64_to_cpu(seg_i->journal->info.kbytes_written);
4634
4635	f2fs_build_gc_manager(sbi);
4636
4637	err = f2fs_build_stats(sbi);
4638	if (err)
4639		goto free_nm;
4640
4641	/* get an inode for node space */
4642	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4643	if (IS_ERR(sbi->node_inode)) {
4644		f2fs_err(sbi, "Failed to read node inode");
4645		err = PTR_ERR(sbi->node_inode);
4646		goto free_stats;
4647	}
4648
4649	/* read root inode and dentry */
4650	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4651	if (IS_ERR(root)) {
4652		f2fs_err(sbi, "Failed to read root inode");
4653		err = PTR_ERR(root);
4654		goto free_node_inode;
4655	}
4656	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4657			!root->i_size || !root->i_nlink) {
4658		iput(root);
4659		err = -EINVAL;
4660		goto free_node_inode;
4661	}
4662
4663	sb->s_root = d_make_root(root); /* allocate root dentry */
4664	if (!sb->s_root) {
4665		err = -ENOMEM;
4666		goto free_node_inode;
4667	}
4668
4669	err = f2fs_init_compress_inode(sbi);
4670	if (err)
4671		goto free_root_inode;
4672
4673	err = f2fs_register_sysfs(sbi);
4674	if (err)
4675		goto free_compress_inode;
4676
4677#ifdef CONFIG_QUOTA
4678	/* Enable quota usage during mount */
4679	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4680		err = f2fs_enable_quotas(sb);
4681		if (err)
4682			f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4683	}
4684
4685	quota_enabled = f2fs_recover_quota_begin(sbi);
4686#endif
4687	/* if there are any orphan inodes, free them */
4688	err = f2fs_recover_orphan_inodes(sbi);
4689	if (err)
4690		goto free_meta;
4691
4692	if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4693		goto reset_checkpoint;
4694
4695	/* recover fsynced data */
4696	if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4697			!test_opt(sbi, NORECOVERY)) {
4698		/*
4699		 * mount should be failed, when device has readonly mode, and
4700		 * previous checkpoint was not done by clean system shutdown.
4701		 */
4702		if (f2fs_hw_is_readonly(sbi)) {
4703			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4704				err = f2fs_recover_fsync_data(sbi, true);
4705				if (err > 0) {
4706					err = -EROFS;
4707					f2fs_err(sbi, "Need to recover fsync data, but "
4708						"write access unavailable, please try "
4709						"mount w/ disable_roll_forward or norecovery");
4710				}
4711				if (err < 0)
4712					goto free_meta;
4713			}
4714			f2fs_info(sbi, "write access unavailable, skipping recovery");
4715			goto reset_checkpoint;
4716		}
4717
4718		if (need_fsck)
4719			set_sbi_flag(sbi, SBI_NEED_FSCK);
4720
4721		if (skip_recovery)
4722			goto reset_checkpoint;
4723
4724		err = f2fs_recover_fsync_data(sbi, false);
4725		if (err < 0) {
4726			if (err != -ENOMEM)
4727				skip_recovery = true;
4728			need_fsck = true;
4729			f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4730				 err);
4731			goto free_meta;
4732		}
4733	} else {
4734		err = f2fs_recover_fsync_data(sbi, true);
4735
4736		if (!f2fs_readonly(sb) && err > 0) {
4737			err = -EINVAL;
4738			f2fs_err(sbi, "Need to recover fsync data");
4739			goto free_meta;
4740		}
4741	}
4742
4743#ifdef CONFIG_QUOTA
4744	f2fs_recover_quota_end(sbi, quota_enabled);
4745#endif
4746reset_checkpoint:
4747	/*
4748	 * If the f2fs is not readonly and fsync data recovery succeeds,
4749	 * check zoned block devices' write pointer consistency.
4750	 */
4751	if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4752		err = f2fs_check_write_pointer(sbi);
4753		if (err)
4754			goto free_meta;
4755	}
4756
4757	f2fs_init_inmem_curseg(sbi);
4758
4759	/* f2fs_recover_fsync_data() cleared this already */
4760	clear_sbi_flag(sbi, SBI_POR_DOING);
4761
4762	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4763		err = f2fs_disable_checkpoint(sbi);
4764		if (err)
4765			goto sync_free_meta;
4766	} else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4767		f2fs_enable_checkpoint(sbi);
4768	}
4769
4770	/*
4771	 * If filesystem is not mounted as read-only then
4772	 * do start the gc_thread.
4773	 */
4774	if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4775		test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4776		/* After POR, we can run background GC thread.*/
4777		err = f2fs_start_gc_thread(sbi);
4778		if (err)
4779			goto sync_free_meta;
4780	}
4781	kvfree(options);
4782
4783	/* recover broken superblock */
4784	if (recovery) {
4785		err = f2fs_commit_super(sbi, true);
4786		f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4787			  sbi->valid_super_block ? 1 : 2, err);
4788	}
4789
4790	f2fs_join_shrinker(sbi);
4791
4792	f2fs_tuning_parameters(sbi);
4793
4794	f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4795		    cur_cp_version(F2FS_CKPT(sbi)));
4796	f2fs_update_time(sbi, CP_TIME);
4797	f2fs_update_time(sbi, REQ_TIME);
4798	clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4799	return 0;
4800
4801sync_free_meta:
4802	/* safe to flush all the data */
4803	sync_filesystem(sbi->sb);
4804	retry_cnt = 0;
4805
4806free_meta:
4807#ifdef CONFIG_QUOTA
4808	f2fs_truncate_quota_inode_pages(sb);
4809	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4810		f2fs_quota_off_umount(sbi->sb);
4811#endif
4812	/*
4813	 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4814	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4815	 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4816	 * falls into an infinite loop in f2fs_sync_meta_pages().
4817	 */
4818	truncate_inode_pages_final(META_MAPPING(sbi));
4819	/* evict some inodes being cached by GC */
4820	evict_inodes(sb);
4821	f2fs_unregister_sysfs(sbi);
4822free_compress_inode:
4823	f2fs_destroy_compress_inode(sbi);
4824free_root_inode:
4825	dput(sb->s_root);
4826	sb->s_root = NULL;
4827free_node_inode:
4828	f2fs_release_ino_entry(sbi, true);
4829	truncate_inode_pages_final(NODE_MAPPING(sbi));
4830	iput(sbi->node_inode);
4831	sbi->node_inode = NULL;
4832free_stats:
4833	f2fs_destroy_stats(sbi);
4834free_nm:
4835	/* stop discard thread before destroying node manager */
4836	f2fs_stop_discard_thread(sbi);
4837	f2fs_destroy_node_manager(sbi);
4838free_sm:
4839	f2fs_destroy_segment_manager(sbi);
4840stop_ckpt_thread:
4841	f2fs_stop_ckpt_thread(sbi);
4842	/* flush s_error_work before sbi destroy */
4843	flush_work(&sbi->s_error_work);
4844	f2fs_destroy_post_read_wq(sbi);
4845free_devices:
4846	destroy_device_list(sbi);
4847	kvfree(sbi->ckpt);
4848free_meta_inode:
4849	make_bad_inode(sbi->meta_inode);
4850	iput(sbi->meta_inode);
4851	sbi->meta_inode = NULL;
4852free_page_array_cache:
4853	f2fs_destroy_page_array_cache(sbi);
4854free_xattr_cache:
4855	f2fs_destroy_xattr_caches(sbi);
4856free_io_dummy:
4857	mempool_destroy(sbi->write_io_dummy);
4858free_percpu:
4859	destroy_percpu_info(sbi);
4860free_iostat:
4861	f2fs_destroy_iostat(sbi);
4862free_bio_info:
4863	for (i = 0; i < NR_PAGE_TYPE; i++)
4864		kvfree(sbi->write_io[i]);
4865
4866#if IS_ENABLED(CONFIG_UNICODE)
4867	utf8_unload(sb->s_encoding);
4868	sb->s_encoding = NULL;
4869#endif
4870free_options:
4871#ifdef CONFIG_QUOTA
4872	for (i = 0; i < MAXQUOTAS; i++)
4873		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4874#endif
4875	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4876	kvfree(options);
4877free_sb_buf:
4878	kfree(raw_super);
4879free_sbi:
4880	if (sbi->s_chksum_driver)
4881		crypto_free_shash(sbi->s_chksum_driver);
4882	kfree(sbi);
4883	sb->s_fs_info = NULL;
4884
4885	/* give only one another chance */
4886	if (retry_cnt > 0 && skip_recovery) {
4887		retry_cnt--;
4888		shrink_dcache_sb(sb);
4889		goto try_onemore;
4890	}
4891	return err;
4892}
4893
4894static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4895			const char *dev_name, void *data)
4896{
4897	return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4898}
4899
4900static void kill_f2fs_super(struct super_block *sb)
4901{
4902	struct f2fs_sb_info *sbi = F2FS_SB(sb);
4903
4904	if (sb->s_root) {
4905		set_sbi_flag(sbi, SBI_IS_CLOSE);
4906		f2fs_stop_gc_thread(sbi);
4907		f2fs_stop_discard_thread(sbi);
4908
4909#ifdef CONFIG_F2FS_FS_COMPRESSION
4910		/*
4911		 * latter evict_inode() can bypass checking and invalidating
4912		 * compress inode cache.
4913		 */
4914		if (test_opt(sbi, COMPRESS_CACHE))
4915			truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4916#endif
4917
4918		if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4919				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4920			struct cp_control cpc = {
4921				.reason = CP_UMOUNT,
4922			};
4923			stat_inc_cp_call_count(sbi, TOTAL_CALL);
4924			f2fs_write_checkpoint(sbi, &cpc);
4925		}
4926
4927		if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4928			sb->s_flags &= ~SB_RDONLY;
4929	}
4930	kill_block_super(sb);
4931	/* Release block devices last, after fscrypt_destroy_keyring(). */
4932	if (sbi) {
4933		destroy_device_list(sbi);
4934		kfree(sbi);
4935		sb->s_fs_info = NULL;
4936	}
4937}
4938
4939static struct file_system_type f2fs_fs_type = {
4940	.owner		= THIS_MODULE,
4941	.name		= "f2fs",
4942	.mount		= f2fs_mount,
4943	.kill_sb	= kill_f2fs_super,
4944	.fs_flags	= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
4945};
4946MODULE_ALIAS_FS("f2fs");
4947
4948static int __init init_inodecache(void)
4949{
4950	f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4951			sizeof(struct f2fs_inode_info), 0,
4952			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4953	return f2fs_inode_cachep ? 0 : -ENOMEM;
4954}
4955
4956static void destroy_inodecache(void)
4957{
4958	/*
4959	 * Make sure all delayed rcu free inodes are flushed before we
4960	 * destroy cache.
4961	 */
4962	rcu_barrier();
4963	kmem_cache_destroy(f2fs_inode_cachep);
4964}
4965
4966static int __init init_f2fs_fs(void)
4967{
4968	int err;
4969
4970	if (PAGE_SIZE != F2FS_BLKSIZE) {
4971		printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
4972				PAGE_SIZE, F2FS_BLKSIZE);
4973		return -EINVAL;
4974	}
4975
4976	err = init_inodecache();
4977	if (err)
4978		goto fail;
4979	err = f2fs_create_node_manager_caches();
4980	if (err)
4981		goto free_inodecache;
4982	err = f2fs_create_segment_manager_caches();
4983	if (err)
4984		goto free_node_manager_caches;
4985	err = f2fs_create_checkpoint_caches();
4986	if (err)
4987		goto free_segment_manager_caches;
4988	err = f2fs_create_recovery_cache();
4989	if (err)
4990		goto free_checkpoint_caches;
4991	err = f2fs_create_extent_cache();
4992	if (err)
4993		goto free_recovery_cache;
4994	err = f2fs_create_garbage_collection_cache();
4995	if (err)
4996		goto free_extent_cache;
4997	err = f2fs_init_sysfs();
4998	if (err)
4999		goto free_garbage_collection_cache;
5000	err = f2fs_init_shrinker();
5001	if (err)
5002		goto free_sysfs;
5003	err = register_filesystem(&f2fs_fs_type);
5004	if (err)
5005		goto free_shrinker;
5006	f2fs_create_root_stats();
5007	err = f2fs_init_post_read_processing();
5008	if (err)
5009		goto free_root_stats;
5010	err = f2fs_init_iostat_processing();
5011	if (err)
5012		goto free_post_read;
5013	err = f2fs_init_bio_entry_cache();
5014	if (err)
5015		goto free_iostat;
5016	err = f2fs_init_bioset();
5017	if (err)
5018		goto free_bio_entry_cache;
5019	err = f2fs_init_compress_mempool();
5020	if (err)
5021		goto free_bioset;
5022	err = f2fs_init_compress_cache();
5023	if (err)
5024		goto free_compress_mempool;
5025	err = f2fs_create_casefold_cache();
5026	if (err)
5027		goto free_compress_cache;
5028	return 0;
5029free_compress_cache:
5030	f2fs_destroy_compress_cache();
5031free_compress_mempool:
5032	f2fs_destroy_compress_mempool();
5033free_bioset:
5034	f2fs_destroy_bioset();
5035free_bio_entry_cache:
5036	f2fs_destroy_bio_entry_cache();
5037free_iostat:
5038	f2fs_destroy_iostat_processing();
5039free_post_read:
5040	f2fs_destroy_post_read_processing();
5041free_root_stats:
5042	f2fs_destroy_root_stats();
5043	unregister_filesystem(&f2fs_fs_type);
5044free_shrinker:
5045	f2fs_exit_shrinker();
5046free_sysfs:
5047	f2fs_exit_sysfs();
5048free_garbage_collection_cache:
5049	f2fs_destroy_garbage_collection_cache();
5050free_extent_cache:
5051	f2fs_destroy_extent_cache();
5052free_recovery_cache:
5053	f2fs_destroy_recovery_cache();
5054free_checkpoint_caches:
5055	f2fs_destroy_checkpoint_caches();
5056free_segment_manager_caches:
5057	f2fs_destroy_segment_manager_caches();
5058free_node_manager_caches:
5059	f2fs_destroy_node_manager_caches();
5060free_inodecache:
5061	destroy_inodecache();
5062fail:
5063	return err;
5064}
5065
5066static void __exit exit_f2fs_fs(void)
5067{
5068	f2fs_destroy_casefold_cache();
5069	f2fs_destroy_compress_cache();
5070	f2fs_destroy_compress_mempool();
5071	f2fs_destroy_bioset();
5072	f2fs_destroy_bio_entry_cache();
5073	f2fs_destroy_iostat_processing();
5074	f2fs_destroy_post_read_processing();
5075	f2fs_destroy_root_stats();
5076	unregister_filesystem(&f2fs_fs_type);
5077	f2fs_exit_shrinker();
5078	f2fs_exit_sysfs();
5079	f2fs_destroy_garbage_collection_cache();
5080	f2fs_destroy_extent_cache();
5081	f2fs_destroy_recovery_cache();
5082	f2fs_destroy_checkpoint_caches();
5083	f2fs_destroy_segment_manager_caches();
5084	f2fs_destroy_node_manager_caches();
5085	destroy_inodecache();
5086}
5087
5088module_init(init_f2fs_fs)
5089module_exit(exit_f2fs_fs)
5090
5091MODULE_AUTHOR("Samsung Electronics's Praesto Team");
5092MODULE_DESCRIPTION("Flash Friendly File System");
5093MODULE_LICENSE("GPL");
5094MODULE_SOFTDEP("pre: crc32");
5095