Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/gc.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/f2fs_fs.h>
  12#include <linux/kthread.h>
  13#include <linux/delay.h>
  14#include <linux/freezer.h>
  15#include <linux/sched/signal.h>
  16#include <linux/random.h>
  17#include <linux/sched/mm.h>
  18
  19#include "f2fs.h"
  20#include "node.h"
  21#include "segment.h"
  22#include "gc.h"
  23#include "iostat.h"
  24#include <trace/events/f2fs.h>
  25
  26static struct kmem_cache *victim_entry_slab;
  27
  28static unsigned int count_bits(const unsigned long *addr,
  29				unsigned int offset, unsigned int len);
  30
  31static int gc_thread_func(void *data)
  32{
  33	struct f2fs_sb_info *sbi = data;
  34	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  35	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
  36	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
  37	unsigned int wait_ms;
  38	struct f2fs_gc_control gc_control = {
  39		.victim_segno = NULL_SEGNO,
  40		.should_migrate_blocks = false,
  41		.err_gc_skipped = false };
  42
  43	wait_ms = gc_th->min_sleep_time;
  44
  45	set_freezable();
  46	do {
  47		bool sync_mode, foreground = false;
  48
  49		wait_event_freezable_timeout(*wq,
  50				kthread_should_stop() ||
  51				waitqueue_active(fggc_wq) ||
  52				gc_th->gc_wake,
  53				msecs_to_jiffies(wait_ms));
  54
  55		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
  56			foreground = true;
  57
  58		/* give it a try one time */
  59		if (gc_th->gc_wake)
  60			gc_th->gc_wake = false;
  61
  62		if (f2fs_readonly(sbi->sb)) {
  63			stat_other_skip_bggc_count(sbi);
  64			continue;
  65		}
  66		if (kthread_should_stop())
  67			break;
  68
  69		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
  70			increase_sleep_time(gc_th, &wait_ms);
  71			stat_other_skip_bggc_count(sbi);
  72			continue;
  73		}
  74
  75		if (time_to_inject(sbi, FAULT_CHECKPOINT))
  76			f2fs_stop_checkpoint(sbi, false,
  77					STOP_CP_REASON_FAULT_INJECT);
  78
  79		if (!sb_start_write_trylock(sbi->sb)) {
  80			stat_other_skip_bggc_count(sbi);
  81			continue;
  82		}
  83
 
 
  84		/*
  85		 * [GC triggering condition]
  86		 * 0. GC is not conducted currently.
  87		 * 1. There are enough dirty segments.
  88		 * 2. IO subsystem is idle by checking the # of writeback pages.
  89		 * 3. IO subsystem is idle by checking the # of requests in
  90		 *    bdev's request list.
  91		 *
  92		 * Note) We have to avoid triggering GCs frequently.
  93		 * Because it is possible that some segments can be
  94		 * invalidated soon after by user update or deletion.
  95		 * So, I'd like to wait some time to collect dirty segments.
  96		 */
  97		if (sbi->gc_mode == GC_URGENT_HIGH ||
  98				sbi->gc_mode == GC_URGENT_MID) {
  99			wait_ms = gc_th->urgent_sleep_time;
 100			f2fs_down_write(&sbi->gc_lock);
 101			goto do_gc;
 102		}
 103
 104		if (foreground) {
 105			f2fs_down_write(&sbi->gc_lock);
 106			goto do_gc;
 107		} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
 108			stat_other_skip_bggc_count(sbi);
 109			goto next;
 110		}
 111
 112		if (!is_idle(sbi, GC_TIME)) {
 113			increase_sleep_time(gc_th, &wait_ms);
 114			f2fs_up_write(&sbi->gc_lock);
 115			stat_io_skip_bggc_count(sbi);
 116			goto next;
 117		}
 118
 119		if (has_enough_invalid_blocks(sbi))
 
 
 
 
 
 
 
 
 
 
 
 120			decrease_sleep_time(gc_th, &wait_ms);
 121		else
 
 
 122			increase_sleep_time(gc_th, &wait_ms);
 
 123do_gc:
 124		stat_inc_gc_call_count(sbi, foreground ?
 125					FOREGROUND : BACKGROUND);
 126
 127		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
 
 128
 129		/* foreground GC was been triggered via f2fs_balance_fs() */
 130		if (foreground)
 131			sync_mode = false;
 132
 133		gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
 134		gc_control.no_bg_gc = foreground;
 135		gc_control.nr_free_secs = foreground ? 1 : 0;
 136
 137		/* if return value is not zero, no victim was selected */
 138		if (f2fs_gc(sbi, &gc_control)) {
 139			/* don't bother wait_ms by foreground gc */
 140			if (!foreground)
 141				wait_ms = gc_th->no_gc_sleep_time;
 142		} else {
 143			/* reset wait_ms to default sleep time */
 144			if (wait_ms == gc_th->no_gc_sleep_time)
 145				wait_ms = gc_th->min_sleep_time;
 146		}
 147
 148		if (foreground)
 149			wake_up_all(&gc_th->fggc_wq);
 150
 151		trace_f2fs_background_gc(sbi->sb, wait_ms,
 152				prefree_segments(sbi), free_segments(sbi));
 153
 154		/* balancing f2fs's metadata periodically */
 155		f2fs_balance_fs_bg(sbi, true);
 156next:
 157		if (sbi->gc_mode != GC_NORMAL) {
 158			spin_lock(&sbi->gc_remaining_trials_lock);
 159			if (sbi->gc_remaining_trials) {
 160				sbi->gc_remaining_trials--;
 161				if (!sbi->gc_remaining_trials)
 162					sbi->gc_mode = GC_NORMAL;
 163			}
 164			spin_unlock(&sbi->gc_remaining_trials_lock);
 165		}
 166		sb_end_write(sbi->sb);
 167
 168	} while (!kthread_should_stop());
 169	return 0;
 170}
 171
 172int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
 173{
 174	struct f2fs_gc_kthread *gc_th;
 175	dev_t dev = sbi->sb->s_bdev->bd_dev;
 176
 177	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
 178	if (!gc_th)
 179		return -ENOMEM;
 180
 181	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
 182	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
 183	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
 184	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
 
 
 
 
 
 
 
 
 
 
 
 
 185
 186	gc_th->gc_wake = false;
 187
 188	sbi->gc_thread = gc_th;
 189	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
 190	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
 191	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
 192			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 193	if (IS_ERR(gc_th->f2fs_gc_task)) {
 194		int err = PTR_ERR(gc_th->f2fs_gc_task);
 195
 196		kfree(gc_th);
 197		sbi->gc_thread = NULL;
 198		return err;
 199	}
 200
 201	return 0;
 202}
 203
 204void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
 205{
 206	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 207
 208	if (!gc_th)
 209		return;
 210	kthread_stop(gc_th->f2fs_gc_task);
 211	wake_up_all(&gc_th->fggc_wq);
 212	kfree(gc_th);
 213	sbi->gc_thread = NULL;
 214}
 215
 216static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
 217{
 218	int gc_mode;
 219
 220	if (gc_type == BG_GC) {
 221		if (sbi->am.atgc_enabled)
 222			gc_mode = GC_AT;
 223		else
 224			gc_mode = GC_CB;
 225	} else {
 226		gc_mode = GC_GREEDY;
 227	}
 228
 229	switch (sbi->gc_mode) {
 230	case GC_IDLE_CB:
 
 
 231		gc_mode = GC_CB;
 232		break;
 233	case GC_IDLE_GREEDY:
 234	case GC_URGENT_HIGH:
 235		gc_mode = GC_GREEDY;
 236		break;
 237	case GC_IDLE_AT:
 238		gc_mode = GC_AT;
 239		break;
 240	}
 241
 242	return gc_mode;
 243}
 244
 245static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 246			int type, struct victim_sel_policy *p)
 247{
 248	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 249
 250	if (p->alloc_mode == SSR) {
 251		p->gc_mode = GC_GREEDY;
 252		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 253		p->max_search = dirty_i->nr_dirty[type];
 254		p->ofs_unit = 1;
 255	} else if (p->alloc_mode == AT_SSR) {
 256		p->gc_mode = GC_GREEDY;
 257		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 258		p->max_search = dirty_i->nr_dirty[type];
 259		p->ofs_unit = 1;
 260	} else {
 261		p->gc_mode = select_gc_type(sbi, gc_type);
 262		p->ofs_unit = SEGS_PER_SEC(sbi);
 263		if (__is_large_section(sbi)) {
 264			p->dirty_bitmap = dirty_i->dirty_secmap;
 265			p->max_search = count_bits(p->dirty_bitmap,
 266						0, MAIN_SECS(sbi));
 267		} else {
 268			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
 269			p->max_search = dirty_i->nr_dirty[DIRTY];
 270		}
 271	}
 272
 273	/*
 274	 * adjust candidates range, should select all dirty segments for
 275	 * foreground GC and urgent GC cases.
 276	 */
 277	if (gc_type != FG_GC &&
 278			(sbi->gc_mode != GC_URGENT_HIGH) &&
 279			(p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
 280			p->max_search > sbi->max_victim_search)
 281		p->max_search = sbi->max_victim_search;
 282
 283	/* let's select beginning hot/small space first. */
 284	if (f2fs_need_rand_seg(sbi))
 285		p->offset = get_random_u32_below(MAIN_SECS(sbi) *
 286						SEGS_PER_SEC(sbi));
 287	else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
 288		p->offset = 0;
 289	else
 290		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
 291}
 292
 293static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 294				struct victim_sel_policy *p)
 295{
 296	/* SSR allocates in a segment unit */
 297	if (p->alloc_mode == SSR)
 298		return BLKS_PER_SEG(sbi);
 299	else if (p->alloc_mode == AT_SSR)
 300		return UINT_MAX;
 301
 302	/* LFS */
 303	if (p->gc_mode == GC_GREEDY)
 304		return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
 305	else if (p->gc_mode == GC_CB)
 306		return UINT_MAX;
 307	else if (p->gc_mode == GC_AT)
 308		return UINT_MAX;
 309	else /* No other gc_mode */
 310		return 0;
 311}
 312
 313static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 314{
 315	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 316	unsigned int secno;
 317
 318	/*
 319	 * If the gc_type is FG_GC, we can select victim segments
 320	 * selected by background GC before.
 321	 * Those segments guarantee they have small valid blocks.
 322	 */
 323	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
 324		if (sec_usage_check(sbi, secno))
 325			continue;
 326		clear_bit(secno, dirty_i->victim_secmap);
 327		return GET_SEG_FROM_SEC(sbi, secno);
 328	}
 329	return NULL_SEGNO;
 330}
 331
 332static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 333{
 334	struct sit_info *sit_i = SIT_I(sbi);
 335	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 336	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 337	unsigned long long mtime = 0;
 338	unsigned int vblocks;
 339	unsigned char age = 0;
 340	unsigned char u;
 341	unsigned int i;
 342	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
 343
 344	for (i = 0; i < usable_segs_per_sec; i++)
 345		mtime += get_seg_entry(sbi, start + i)->mtime;
 346	vblocks = get_valid_blocks(sbi, segno, true);
 347
 348	mtime = div_u64(mtime, usable_segs_per_sec);
 349	vblocks = div_u64(vblocks, usable_segs_per_sec);
 350
 351	u = BLKS_TO_SEGS(sbi, vblocks * 100);
 352
 353	/* Handle if the system time has changed by the user */
 354	if (mtime < sit_i->min_mtime)
 355		sit_i->min_mtime = mtime;
 356	if (mtime > sit_i->max_mtime)
 357		sit_i->max_mtime = mtime;
 358	if (sit_i->max_mtime != sit_i->min_mtime)
 359		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
 360				sit_i->max_mtime - sit_i->min_mtime);
 361
 362	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 363}
 364
 365static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
 366			unsigned int segno, struct victim_sel_policy *p)
 367{
 368	if (p->alloc_mode == SSR)
 369		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
 370
 
 
 
 
 
 371	/* alloc_mode == LFS */
 372	if (p->gc_mode == GC_GREEDY)
 373		return get_valid_blocks(sbi, segno, true);
 374	else if (p->gc_mode == GC_CB)
 375		return get_cb_cost(sbi, segno);
 376
 377	f2fs_bug_on(sbi, 1);
 378	return 0;
 379}
 380
 381static unsigned int count_bits(const unsigned long *addr,
 382				unsigned int offset, unsigned int len)
 383{
 384	unsigned int end = offset + len, sum = 0;
 385
 386	while (offset < end) {
 387		if (test_bit(offset++, addr))
 388			++sum;
 389	}
 390	return sum;
 391}
 392
 393static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
 394				struct rb_root_cached *root)
 395{
 396#ifdef CONFIG_F2FS_CHECK_FS
 397	struct rb_node *cur = rb_first_cached(root), *next;
 398	struct victim_entry *cur_ve, *next_ve;
 399
 400	while (cur) {
 401		next = rb_next(cur);
 402		if (!next)
 403			return true;
 404
 405		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
 406		next_ve = rb_entry(next, struct victim_entry, rb_node);
 407
 408		if (cur_ve->mtime > next_ve->mtime) {
 409			f2fs_info(sbi, "broken victim_rbtree, "
 410				"cur_mtime(%llu) next_mtime(%llu)",
 411				cur_ve->mtime, next_ve->mtime);
 412			return false;
 413		}
 414		cur = next;
 415	}
 416#endif
 417	return true;
 418}
 419
 420static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
 421					unsigned long long mtime)
 422{
 423	struct atgc_management *am = &sbi->am;
 424	struct rb_node *node = am->root.rb_root.rb_node;
 425	struct victim_entry *ve = NULL;
 426
 427	while (node) {
 428		ve = rb_entry(node, struct victim_entry, rb_node);
 429
 430		if (mtime < ve->mtime)
 431			node = node->rb_left;
 432		else
 433			node = node->rb_right;
 434	}
 435	return ve;
 436}
 437
 438static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
 439		unsigned long long mtime, unsigned int segno)
 440{
 441	struct atgc_management *am = &sbi->am;
 442	struct victim_entry *ve;
 443
 444	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
 445
 446	ve->mtime = mtime;
 447	ve->segno = segno;
 448
 449	list_add_tail(&ve->list, &am->victim_list);
 450	am->victim_count++;
 451
 452	return ve;
 453}
 454
 455static void __insert_victim_entry(struct f2fs_sb_info *sbi,
 456				unsigned long long mtime, unsigned int segno)
 457{
 458	struct atgc_management *am = &sbi->am;
 459	struct rb_root_cached *root = &am->root;
 460	struct rb_node **p = &root->rb_root.rb_node;
 461	struct rb_node *parent = NULL;
 462	struct victim_entry *ve;
 463	bool left_most = true;
 464
 465	/* look up rb tree to find parent node */
 466	while (*p) {
 467		parent = *p;
 468		ve = rb_entry(parent, struct victim_entry, rb_node);
 469
 470		if (mtime < ve->mtime) {
 471			p = &(*p)->rb_left;
 472		} else {
 473			p = &(*p)->rb_right;
 474			left_most = false;
 475		}
 476	}
 477
 478	ve = __create_victim_entry(sbi, mtime, segno);
 479
 480	rb_link_node(&ve->rb_node, parent, p);
 481	rb_insert_color_cached(&ve->rb_node, root, left_most);
 482}
 483
 484static void add_victim_entry(struct f2fs_sb_info *sbi,
 485				struct victim_sel_policy *p, unsigned int segno)
 486{
 487	struct sit_info *sit_i = SIT_I(sbi);
 488	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 489	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 490	unsigned long long mtime = 0;
 491	unsigned int i;
 492
 493	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 494		if (p->gc_mode == GC_AT &&
 495			get_valid_blocks(sbi, segno, true) == 0)
 496			return;
 497	}
 498
 499	for (i = 0; i < SEGS_PER_SEC(sbi); i++)
 500		mtime += get_seg_entry(sbi, start + i)->mtime;
 501	mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
 502
 503	/* Handle if the system time has changed by the user */
 504	if (mtime < sit_i->min_mtime)
 505		sit_i->min_mtime = mtime;
 506	if (mtime > sit_i->max_mtime)
 507		sit_i->max_mtime = mtime;
 508	if (mtime < sit_i->dirty_min_mtime)
 509		sit_i->dirty_min_mtime = mtime;
 510	if (mtime > sit_i->dirty_max_mtime)
 511		sit_i->dirty_max_mtime = mtime;
 512
 513	/* don't choose young section as candidate */
 514	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
 515		return;
 516
 517	__insert_victim_entry(sbi, mtime, segno);
 518}
 519
 520static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
 521						struct victim_sel_policy *p)
 522{
 523	struct sit_info *sit_i = SIT_I(sbi);
 524	struct atgc_management *am = &sbi->am;
 525	struct rb_root_cached *root = &am->root;
 526	struct rb_node *node;
 527	struct victim_entry *ve;
 528	unsigned long long total_time;
 529	unsigned long long age, u, accu;
 530	unsigned long long max_mtime = sit_i->dirty_max_mtime;
 531	unsigned long long min_mtime = sit_i->dirty_min_mtime;
 532	unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
 533	unsigned int vblocks;
 534	unsigned int dirty_threshold = max(am->max_candidate_count,
 535					am->candidate_ratio *
 536					am->victim_count / 100);
 537	unsigned int age_weight = am->age_weight;
 538	unsigned int cost;
 539	unsigned int iter = 0;
 540
 541	if (max_mtime < min_mtime)
 542		return;
 543
 544	max_mtime += 1;
 545	total_time = max_mtime - min_mtime;
 546
 547	accu = div64_u64(ULLONG_MAX, total_time);
 548	accu = min_t(unsigned long long, div_u64(accu, 100),
 549					DEFAULT_ACCURACY_CLASS);
 550
 551	node = rb_first_cached(root);
 552next:
 553	ve = rb_entry_safe(node, struct victim_entry, rb_node);
 554	if (!ve)
 555		return;
 556
 557	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 558		goto skip;
 559
 560	/* age = 10000 * x% * 60 */
 561	age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
 562								age_weight;
 563
 564	vblocks = get_valid_blocks(sbi, ve->segno, true);
 565	f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
 566
 567	/* u = 10000 * x% * 40 */
 568	u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
 569							(100 - age_weight);
 570
 571	f2fs_bug_on(sbi, age + u >= UINT_MAX);
 572
 573	cost = UINT_MAX - (age + u);
 574	iter++;
 575
 576	if (cost < p->min_cost ||
 577			(cost == p->min_cost && age > p->oldest_age)) {
 578		p->min_cost = cost;
 579		p->oldest_age = age;
 580		p->min_segno = ve->segno;
 581	}
 582skip:
 583	if (iter < dirty_threshold) {
 584		node = rb_next(node);
 585		goto next;
 586	}
 587}
 588
 589/*
 590 * select candidates around source section in range of
 591 * [target - dirty_threshold, target + dirty_threshold]
 592 */
 593static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
 594						struct victim_sel_policy *p)
 595{
 596	struct sit_info *sit_i = SIT_I(sbi);
 597	struct atgc_management *am = &sbi->am;
 598	struct victim_entry *ve;
 599	unsigned long long age;
 600	unsigned long long max_mtime = sit_i->dirty_max_mtime;
 601	unsigned long long min_mtime = sit_i->dirty_min_mtime;
 602	unsigned int vblocks;
 603	unsigned int dirty_threshold = max(am->max_candidate_count,
 604					am->candidate_ratio *
 605					am->victim_count / 100);
 606	unsigned int cost, iter;
 607	int stage = 0;
 608
 609	if (max_mtime < min_mtime)
 610		return;
 611	max_mtime += 1;
 612next_stage:
 613	iter = 0;
 614	ve = __lookup_victim_entry(sbi, p->age);
 615next_node:
 616	if (!ve) {
 617		if (stage++ == 0)
 618			goto next_stage;
 619		return;
 620	}
 621
 622	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 623		goto skip_node;
 624
 625	age = max_mtime - ve->mtime;
 626
 627	vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
 628	f2fs_bug_on(sbi, !vblocks);
 629
 630	/* rare case */
 631	if (vblocks == BLKS_PER_SEG(sbi))
 632		goto skip_node;
 633
 634	iter++;
 635
 636	age = max_mtime - abs(p->age - age);
 637	cost = UINT_MAX - vblocks;
 638
 639	if (cost < p->min_cost ||
 640			(cost == p->min_cost && age > p->oldest_age)) {
 641		p->min_cost = cost;
 642		p->oldest_age = age;
 643		p->min_segno = ve->segno;
 644	}
 645skip_node:
 646	if (iter < dirty_threshold) {
 647		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
 648					rb_next(&ve->rb_node),
 649					struct victim_entry, rb_node);
 650		goto next_node;
 651	}
 652
 653	if (stage++ == 0)
 654		goto next_stage;
 655}
 656
 657static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
 658						struct victim_sel_policy *p)
 659{
 660	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
 661
 662	if (p->gc_mode == GC_AT)
 663		atgc_lookup_victim(sbi, p);
 664	else if (p->alloc_mode == AT_SSR)
 665		atssr_lookup_victim(sbi, p);
 666	else
 667		f2fs_bug_on(sbi, 1);
 668}
 669
 670static void release_victim_entry(struct f2fs_sb_info *sbi)
 671{
 672	struct atgc_management *am = &sbi->am;
 673	struct victim_entry *ve, *tmp;
 674
 675	list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
 676		list_del(&ve->list);
 677		kmem_cache_free(victim_entry_slab, ve);
 678		am->victim_count--;
 679	}
 680
 681	am->root = RB_ROOT_CACHED;
 682
 683	f2fs_bug_on(sbi, am->victim_count);
 684	f2fs_bug_on(sbi, !list_empty(&am->victim_list));
 685}
 686
 687static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
 688{
 689	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 690	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 691
 692	if (!dirty_i->enable_pin_section)
 693		return false;
 694	if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
 695		dirty_i->pinned_secmap_cnt++;
 696	return true;
 697}
 698
 699static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
 700{
 701	return dirty_i->pinned_secmap_cnt;
 702}
 703
 704static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
 705						unsigned int secno)
 706{
 707	return dirty_i->enable_pin_section &&
 708		f2fs_pinned_section_exists(dirty_i) &&
 709		test_bit(secno, dirty_i->pinned_secmap);
 710}
 711
 712static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
 713{
 714	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
 715
 716	if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
 717		memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
 718		DIRTY_I(sbi)->pinned_secmap_cnt = 0;
 719	}
 720	DIRTY_I(sbi)->enable_pin_section = enable;
 721}
 722
 723static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
 724							unsigned int segno)
 725{
 726	if (!f2fs_is_pinned_file(inode))
 727		return 0;
 728	if (gc_type != FG_GC)
 729		return -EBUSY;
 730	if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
 731		f2fs_pin_file_control(inode, true);
 732	return -EAGAIN;
 733}
 734
 735/*
 736 * This function is called from two paths.
 737 * One is garbage collection and the other is SSR segment selection.
 738 * When it is called during GC, it just gets a victim segment
 739 * and it does not remove it from dirty seglist.
 740 * When it is called from SSR segment selection, it finds a segment
 741 * which has minimum valid blocks and removes it from dirty seglist.
 742 */
 743int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
 744			int gc_type, int type, char alloc_mode,
 745			unsigned long long age)
 746{
 747	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 748	struct sit_info *sm = SIT_I(sbi);
 749	struct victim_sel_policy p;
 750	unsigned int secno, last_victim;
 751	unsigned int last_segment;
 752	unsigned int nsearched;
 753	bool is_atgc;
 754	int ret = 0;
 755
 756	mutex_lock(&dirty_i->seglist_lock);
 757	last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
 758
 759	p.alloc_mode = alloc_mode;
 760	p.age = age;
 761	p.age_threshold = sbi->am.age_threshold;
 
 762
 763retry:
 764	select_policy(sbi, gc_type, type, &p);
 765	p.min_segno = NULL_SEGNO;
 766	p.oldest_age = 0;
 767	p.min_cost = get_max_cost(sbi, &p);
 768
 769	is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
 770	nsearched = 0;
 771
 772	if (is_atgc)
 773		SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
 774
 775	if (*result != NULL_SEGNO) {
 776		if (!get_valid_blocks(sbi, *result, false)) {
 777			ret = -ENODATA;
 778			goto out;
 779		}
 780
 781		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 782			ret = -EBUSY;
 783		else
 784			p.min_segno = *result;
 785		goto out;
 786	}
 787
 788	ret = -ENODATA;
 789	if (p.max_search == 0)
 790		goto out;
 791
 792	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
 793		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
 794			p.min_segno = sbi->next_victim_seg[BG_GC];
 795			*result = p.min_segno;
 796			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
 797			goto got_result;
 798		}
 799		if (gc_type == FG_GC &&
 800				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
 801			p.min_segno = sbi->next_victim_seg[FG_GC];
 802			*result = p.min_segno;
 803			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 804			goto got_result;
 805		}
 806	}
 807
 808	last_victim = sm->last_victim[p.gc_mode];
 809	if (p.alloc_mode == LFS && gc_type == FG_GC) {
 810		p.min_segno = check_bg_victims(sbi);
 811		if (p.min_segno != NULL_SEGNO)
 812			goto got_it;
 813	}
 814
 815	while (1) {
 816		unsigned long cost, *dirty_bitmap;
 817		unsigned int unit_no, segno;
 818
 819		dirty_bitmap = p.dirty_bitmap;
 820		unit_no = find_next_bit(dirty_bitmap,
 821				last_segment / p.ofs_unit,
 822				p.offset / p.ofs_unit);
 823		segno = unit_no * p.ofs_unit;
 824		if (segno >= last_segment) {
 825			if (sm->last_victim[p.gc_mode]) {
 826				last_segment =
 827					sm->last_victim[p.gc_mode];
 828				sm->last_victim[p.gc_mode] = 0;
 829				p.offset = 0;
 830				continue;
 831			}
 832			break;
 833		}
 834
 835		p.offset = segno + p.ofs_unit;
 836		nsearched++;
 837
 838#ifdef CONFIG_F2FS_CHECK_FS
 839		/*
 840		 * skip selecting the invalid segno (that is failed due to block
 841		 * validity check failure during GC) to avoid endless GC loop in
 842		 * such cases.
 843		 */
 844		if (test_bit(segno, sm->invalid_segmap))
 845			goto next;
 846#endif
 847
 848		secno = GET_SEC_FROM_SEG(sbi, segno);
 849
 850		if (sec_usage_check(sbi, secno))
 851			goto next;
 852
 853		/* Don't touch checkpointed data */
 854		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 855			if (p.alloc_mode == LFS) {
 856				/*
 857				 * LFS is set to find source section during GC.
 858				 * The victim should have no checkpointed data.
 859				 */
 860				if (get_ckpt_valid_blocks(sbi, segno, true))
 861					goto next;
 862			} else {
 863				/*
 864				 * SSR | AT_SSR are set to find target segment
 865				 * for writes which can be full by checkpointed
 866				 * and newly written blocks.
 867				 */
 868				if (!f2fs_segment_has_free_slot(sbi, segno))
 869					goto next;
 870			}
 871		}
 872
 873		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 874			goto next;
 875
 876		if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
 877			goto next;
 878
 879		if (is_atgc) {
 880			add_victim_entry(sbi, &p, segno);
 881			goto next;
 882		}
 883
 884		cost = get_gc_cost(sbi, segno, &p);
 885
 886		if (p.min_cost > cost) {
 887			p.min_segno = segno;
 888			p.min_cost = cost;
 889		}
 890next:
 891		if (nsearched >= p.max_search) {
 892			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
 893				sm->last_victim[p.gc_mode] =
 894					last_victim + p.ofs_unit;
 895			else
 896				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
 897			sm->last_victim[p.gc_mode] %=
 898				(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
 899			break;
 900		}
 901	}
 902
 903	/* get victim for GC_AT/AT_SSR */
 904	if (is_atgc) {
 905		lookup_victim_by_age(sbi, &p);
 906		release_victim_entry(sbi);
 907	}
 908
 909	if (is_atgc && p.min_segno == NULL_SEGNO &&
 910			sm->elapsed_time < p.age_threshold) {
 911		p.age_threshold = 0;
 912		goto retry;
 913	}
 914
 915	if (p.min_segno != NULL_SEGNO) {
 916got_it:
 917		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 918got_result:
 919		if (p.alloc_mode == LFS) {
 920			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 921			if (gc_type == FG_GC)
 922				sbi->cur_victim_sec = secno;
 923			else
 924				set_bit(secno, dirty_i->victim_secmap);
 925		}
 926		ret = 0;
 927
 928	}
 929out:
 930	if (p.min_segno != NULL_SEGNO)
 931		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 932				sbi->cur_victim_sec,
 933				prefree_segments(sbi), free_segments(sbi));
 934	mutex_unlock(&dirty_i->seglist_lock);
 935
 936	return ret;
 937}
 938
 939static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
 940{
 941	struct inode_entry *ie;
 942
 943	ie = radix_tree_lookup(&gc_list->iroot, ino);
 944	if (ie)
 945		return ie->inode;
 946	return NULL;
 947}
 948
 949static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
 950{
 951	struct inode_entry *new_ie;
 952
 953	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
 954		iput(inode);
 955		return;
 956	}
 957	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
 958					GFP_NOFS, true, NULL);
 959	new_ie->inode = inode;
 960
 961	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
 962	list_add_tail(&new_ie->list, &gc_list->ilist);
 963}
 964
 965static void put_gc_inode(struct gc_inode_list *gc_list)
 966{
 967	struct inode_entry *ie, *next_ie;
 968
 969	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
 970		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
 971		iput(ie->inode);
 972		list_del(&ie->list);
 973		kmem_cache_free(f2fs_inode_entry_slab, ie);
 974	}
 975}
 976
 977static int check_valid_map(struct f2fs_sb_info *sbi,
 978				unsigned int segno, int offset)
 979{
 980	struct sit_info *sit_i = SIT_I(sbi);
 981	struct seg_entry *sentry;
 982	int ret;
 983
 984	down_read(&sit_i->sentry_lock);
 985	sentry = get_seg_entry(sbi, segno);
 986	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
 987	up_read(&sit_i->sentry_lock);
 988	return ret;
 989}
 990
 991/*
 992 * This function compares node address got in summary with that in NAT.
 993 * On validity, copy that node with cold status, otherwise (invalid node)
 994 * ignore that.
 995 */
 996static int gc_node_segment(struct f2fs_sb_info *sbi,
 997		struct f2fs_summary *sum, unsigned int segno, int gc_type)
 998{
 999	struct f2fs_summary *entry;
1000	block_t start_addr;
1001	int off;
1002	int phase = 0;
1003	bool fggc = (gc_type == FG_GC);
1004	int submitted = 0;
1005	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1006
1007	start_addr = START_BLOCK(sbi, segno);
1008
1009next_step:
1010	entry = sum;
1011
1012	if (fggc && phase == 2)
1013		atomic_inc(&sbi->wb_sync_req[NODE]);
1014
1015	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1016		nid_t nid = le32_to_cpu(entry->nid);
1017		struct page *node_page;
1018		struct node_info ni;
1019		int err;
1020
1021		/* stop BG_GC if there is not enough free sections. */
1022		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1023			return submitted;
1024
1025		if (check_valid_map(sbi, segno, off) == 0)
1026			continue;
1027
1028		if (phase == 0) {
1029			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1030							META_NAT, true);
1031			continue;
1032		}
1033
1034		if (phase == 1) {
1035			f2fs_ra_node_page(sbi, nid);
1036			continue;
1037		}
1038
1039		/* phase == 2 */
1040		node_page = f2fs_get_node_page(sbi, nid);
1041		if (IS_ERR(node_page))
1042			continue;
1043
1044		/* block may become invalid during f2fs_get_node_page */
1045		if (check_valid_map(sbi, segno, off) == 0) {
1046			f2fs_put_page(node_page, 1);
1047			continue;
1048		}
1049
1050		if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1051			f2fs_put_page(node_page, 1);
1052			continue;
1053		}
1054
1055		if (ni.blk_addr != start_addr + off) {
1056			f2fs_put_page(node_page, 1);
1057			continue;
1058		}
1059
1060		err = f2fs_move_node_page(node_page, gc_type);
1061		if (!err && gc_type == FG_GC)
1062			submitted++;
1063		stat_inc_node_blk_count(sbi, 1, gc_type);
1064	}
1065
1066	if (++phase < 3)
1067		goto next_step;
1068
1069	if (fggc)
1070		atomic_dec(&sbi->wb_sync_req[NODE]);
1071	return submitted;
1072}
1073
1074/*
1075 * Calculate start block index indicating the given node offset.
1076 * Be careful, caller should give this node offset only indicating direct node
1077 * blocks. If any node offsets, which point the other types of node blocks such
1078 * as indirect or double indirect node blocks, are given, it must be a caller's
1079 * bug.
1080 */
1081block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1082{
1083	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1084	unsigned int bidx;
1085
1086	if (node_ofs == 0)
1087		return 0;
1088
1089	if (node_ofs <= 2) {
1090		bidx = node_ofs - 1;
1091	} else if (node_ofs <= indirect_blks) {
1092		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1093
1094		bidx = node_ofs - 2 - dec;
1095	} else {
1096		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1097
1098		bidx = node_ofs - 5 - dec;
1099	}
1100	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1101}
1102
1103static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1104		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1105{
1106	struct page *node_page;
1107	nid_t nid;
1108	unsigned int ofs_in_node, max_addrs, base;
1109	block_t source_blkaddr;
1110
1111	nid = le32_to_cpu(sum->nid);
1112	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1113
1114	node_page = f2fs_get_node_page(sbi, nid);
1115	if (IS_ERR(node_page))
1116		return false;
1117
1118	if (f2fs_get_node_info(sbi, nid, dni, false)) {
1119		f2fs_put_page(node_page, 1);
1120		return false;
1121	}
1122
1123	if (sum->version != dni->version) {
1124		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1125			  __func__);
1126		set_sbi_flag(sbi, SBI_NEED_FSCK);
1127	}
1128
1129	if (f2fs_check_nid_range(sbi, dni->ino)) {
1130		f2fs_put_page(node_page, 1);
1131		return false;
1132	}
1133
1134	if (IS_INODE(node_page)) {
1135		base = offset_in_addr(F2FS_INODE(node_page));
1136		max_addrs = DEF_ADDRS_PER_INODE;
1137	} else {
1138		base = 0;
1139		max_addrs = DEF_ADDRS_PER_BLOCK;
1140	}
1141
1142	if (base + ofs_in_node >= max_addrs) {
1143		f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1144			base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1145		f2fs_put_page(node_page, 1);
1146		return false;
1147	}
1148
1149	*nofs = ofs_of_node(node_page);
1150	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1151	f2fs_put_page(node_page, 1);
1152
1153	if (source_blkaddr != blkaddr) {
1154#ifdef CONFIG_F2FS_CHECK_FS
1155		unsigned int segno = GET_SEGNO(sbi, blkaddr);
1156		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1157
1158		if (unlikely(check_valid_map(sbi, segno, offset))) {
1159			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1160				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1161					 blkaddr, source_blkaddr, segno);
1162				set_sbi_flag(sbi, SBI_NEED_FSCK);
1163			}
1164		}
1165#endif
1166		return false;
1167	}
1168	return true;
1169}
1170
1171static int ra_data_block(struct inode *inode, pgoff_t index)
1172{
1173	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174	struct address_space *mapping = inode->i_mapping;
 
1175	struct dnode_of_data dn;
1176	struct page *page;
1177	struct f2fs_io_info fio = {
1178		.sbi = sbi,
1179		.ino = inode->i_ino,
1180		.type = DATA,
1181		.temp = COLD,
1182		.op = REQ_OP_READ,
1183		.op_flags = 0,
1184		.encrypted_page = NULL,
1185		.in_list = 0,
1186	};
1187	int err;
1188
1189	page = f2fs_grab_cache_page(mapping, index, true);
1190	if (!page)
1191		return -ENOMEM;
1192
1193	if (f2fs_lookup_read_extent_cache_block(inode, index,
1194						&dn.data_blkaddr)) {
1195		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1196						DATA_GENERIC_ENHANCE_READ))) {
1197			err = -EFSCORRUPTED;
1198			goto put_page;
1199		}
1200		goto got_it;
1201	}
1202
1203	set_new_dnode(&dn, inode, NULL, NULL, 0);
1204	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1205	if (err)
1206		goto put_page;
1207	f2fs_put_dnode(&dn);
1208
1209	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1210		err = -ENOENT;
1211		goto put_page;
1212	}
1213	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1214						DATA_GENERIC_ENHANCE))) {
1215		err = -EFSCORRUPTED;
1216		goto put_page;
1217	}
1218got_it:
1219	/* read page */
1220	fio.page = page;
1221	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1222
1223	/*
1224	 * don't cache encrypted data into meta inode until previous dirty
1225	 * data were writebacked to avoid racing between GC and flush.
1226	 */
1227	f2fs_wait_on_page_writeback(page, DATA, true, true);
1228
1229	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1230
1231	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1232					dn.data_blkaddr,
1233					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1234	if (!fio.encrypted_page) {
1235		err = -ENOMEM;
1236		goto put_page;
1237	}
1238
1239	err = f2fs_submit_page_bio(&fio);
1240	if (err)
1241		goto put_encrypted_page;
1242	f2fs_put_page(fio.encrypted_page, 0);
1243	f2fs_put_page(page, 1);
1244
1245	f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1246	f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1247
1248	return 0;
1249put_encrypted_page:
1250	f2fs_put_page(fio.encrypted_page, 1);
1251put_page:
1252	f2fs_put_page(page, 1);
1253	return err;
1254}
1255
1256/*
1257 * Move data block via META_MAPPING while keeping locked data page.
1258 * This can be used to move blocks, aka LBAs, directly on disk.
1259 */
1260static int move_data_block(struct inode *inode, block_t bidx,
1261				int gc_type, unsigned int segno, int off)
1262{
 
 
1263	struct f2fs_io_info fio = {
1264		.sbi = F2FS_I_SB(inode),
1265		.ino = inode->i_ino,
1266		.type = DATA,
1267		.temp = COLD,
1268		.op = REQ_OP_READ,
1269		.op_flags = 0,
1270		.encrypted_page = NULL,
1271		.in_list = 0,
1272	};
1273	struct dnode_of_data dn;
1274	struct f2fs_summary sum;
1275	struct node_info ni;
1276	struct page *page, *mpage;
1277	block_t newaddr;
1278	int err = 0;
1279	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1280	int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1281				(fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1282				CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1283
1284	/* do not read out */
1285	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1286	if (!page)
1287		return -ENOMEM;
1288
1289	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1290		err = -ENOENT;
1291		goto out;
1292	}
1293
1294	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1295	if (err)
1296		goto out;
1297
1298	set_new_dnode(&dn, inode, NULL, NULL, 0);
1299	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1300	if (err)
1301		goto out;
1302
1303	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1304		ClearPageUptodate(page);
1305		err = -ENOENT;
1306		goto put_out;
1307	}
1308
1309	/*
1310	 * don't cache encrypted data into meta inode until previous dirty
1311	 * data were writebacked to avoid racing between GC and flush.
1312	 */
1313	f2fs_wait_on_page_writeback(page, DATA, true, true);
1314
1315	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1316
1317	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1318	if (err)
1319		goto put_out;
1320
1321	/* read page */
1322	fio.page = page;
1323	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1324
1325	if (lfs_mode)
1326		f2fs_down_write(&fio.sbi->io_order_lock);
1327
1328	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1329					fio.old_blkaddr, false);
1330	if (!mpage) {
1331		err = -ENOMEM;
1332		goto up_out;
1333	}
1334
1335	fio.encrypted_page = mpage;
1336
1337	/* read source block in mpage */
1338	if (!PageUptodate(mpage)) {
1339		err = f2fs_submit_page_bio(&fio);
1340		if (err) {
1341			f2fs_put_page(mpage, 1);
1342			goto up_out;
1343		}
1344
1345		f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1346							F2FS_BLKSIZE);
1347		f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1348							F2FS_BLKSIZE);
1349
1350		lock_page(mpage);
1351		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1352						!PageUptodate(mpage))) {
1353			err = -EIO;
1354			f2fs_put_page(mpage, 1);
1355			goto up_out;
1356		}
1357	}
1358
1359	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1360
1361	/* allocate block address */
1362	err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1363				&sum, type, NULL);
1364	if (err) {
1365		f2fs_put_page(mpage, 1);
1366		/* filesystem should shutdown, no need to recovery block */
1367		goto up_out;
1368	}
1369
1370	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1371				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1372	if (!fio.encrypted_page) {
1373		err = -ENOMEM;
1374		f2fs_put_page(mpage, 1);
1375		goto recover_block;
1376	}
1377
1378	/* write target block */
1379	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1380	memcpy(page_address(fio.encrypted_page),
1381				page_address(mpage), PAGE_SIZE);
1382	f2fs_put_page(mpage, 1);
1383
1384	f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1385
1386	set_page_dirty(fio.encrypted_page);
1387	if (clear_page_dirty_for_io(fio.encrypted_page))
1388		dec_page_count(fio.sbi, F2FS_DIRTY_META);
1389
1390	set_page_writeback(fio.encrypted_page);
1391
1392	fio.op = REQ_OP_WRITE;
1393	fio.op_flags = REQ_SYNC;
1394	fio.new_blkaddr = newaddr;
1395	f2fs_submit_page_write(&fio);
1396
1397	f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1398
1399	f2fs_update_data_blkaddr(&dn, newaddr);
1400	set_inode_flag(inode, FI_APPEND_WRITE);
1401
1402	f2fs_put_page(fio.encrypted_page, 1);
1403recover_block:
1404	if (err)
1405		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1406							true, true, true);
1407up_out:
1408	if (lfs_mode)
1409		f2fs_up_write(&fio.sbi->io_order_lock);
1410put_out:
1411	f2fs_put_dnode(&dn);
1412out:
1413	f2fs_put_page(page, 1);
1414	return err;
1415}
1416
1417static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1418							unsigned int segno, int off)
1419{
1420	struct page *page;
1421	int err = 0;
1422
1423	page = f2fs_get_lock_data_page(inode, bidx, true);
1424	if (IS_ERR(page))
1425		return PTR_ERR(page);
1426
1427	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1428		err = -ENOENT;
1429		goto out;
1430	}
1431
1432	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1433	if (err)
1434		goto out;
1435
1436	if (gc_type == BG_GC) {
1437		if (PageWriteback(page)) {
1438			err = -EAGAIN;
1439			goto out;
1440		}
1441		set_page_dirty(page);
1442		set_page_private_gcing(page);
1443	} else {
1444		struct f2fs_io_info fio = {
1445			.sbi = F2FS_I_SB(inode),
1446			.ino = inode->i_ino,
1447			.type = DATA,
1448			.temp = COLD,
1449			.op = REQ_OP_WRITE,
1450			.op_flags = REQ_SYNC,
1451			.old_blkaddr = NULL_ADDR,
1452			.page = page,
1453			.encrypted_page = NULL,
1454			.need_lock = LOCK_REQ,
1455			.io_type = FS_GC_DATA_IO,
1456		};
1457		bool is_dirty = PageDirty(page);
1458
1459retry:
1460		f2fs_wait_on_page_writeback(page, DATA, true, true);
1461
1462		set_page_dirty(page);
1463		if (clear_page_dirty_for_io(page)) {
1464			inode_dec_dirty_pages(inode);
1465			f2fs_remove_dirty_inode(inode);
1466		}
1467
1468		set_page_private_gcing(page);
1469
1470		err = f2fs_do_write_data_page(&fio);
1471		if (err) {
1472			clear_page_private_gcing(page);
1473			if (err == -ENOMEM) {
1474				memalloc_retry_wait(GFP_NOFS);
1475				goto retry;
1476			}
1477			if (is_dirty)
1478				set_page_dirty(page);
1479		}
1480	}
1481out:
1482	f2fs_put_page(page, 1);
1483	return err;
1484}
1485
1486/*
1487 * This function tries to get parent node of victim data block, and identifies
1488 * data block validity. If the block is valid, copy that with cold status and
1489 * modify parent node.
1490 * If the parent node is not valid or the data block address is different,
1491 * the victim data block is ignored.
1492 */
1493static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1494		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1495		bool force_migrate)
1496{
1497	struct super_block *sb = sbi->sb;
1498	struct f2fs_summary *entry;
1499	block_t start_addr;
1500	int off;
1501	int phase = 0;
1502	int submitted = 0;
1503	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1504
1505	start_addr = START_BLOCK(sbi, segno);
1506
1507next_step:
1508	entry = sum;
1509
1510	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1511		struct page *data_page;
1512		struct inode *inode;
1513		struct node_info dni; /* dnode info for the data */
1514		unsigned int ofs_in_node, nofs;
1515		block_t start_bidx;
1516		nid_t nid = le32_to_cpu(entry->nid);
1517
1518		/*
1519		 * stop BG_GC if there is not enough free sections.
1520		 * Or, stop GC if the segment becomes fully valid caused by
1521		 * race condition along with SSR block allocation.
1522		 */
1523		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1524			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
1525							CAP_BLKS_PER_SEC(sbi)))
1526			return submitted;
1527
1528		if (check_valid_map(sbi, segno, off) == 0)
1529			continue;
1530
1531		if (phase == 0) {
1532			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1533							META_NAT, true);
1534			continue;
1535		}
1536
1537		if (phase == 1) {
1538			f2fs_ra_node_page(sbi, nid);
1539			continue;
1540		}
1541
1542		/* Get an inode by ino with checking validity */
1543		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1544			continue;
1545
1546		if (phase == 2) {
1547			f2fs_ra_node_page(sbi, dni.ino);
1548			continue;
1549		}
1550
1551		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1552
1553		if (phase == 3) {
1554			int err;
1555
1556			inode = f2fs_iget(sb, dni.ino);
1557			if (IS_ERR(inode))
1558				continue;
1559
1560			if (is_bad_inode(inode) ||
1561					special_file(inode->i_mode)) {
1562				iput(inode);
1563				continue;
1564			}
1565
 
 
 
 
 
 
 
 
 
 
1566			err = f2fs_gc_pinned_control(inode, gc_type, segno);
1567			if (err == -EAGAIN) {
1568				iput(inode);
1569				return submitted;
1570			}
1571
1572			if (!f2fs_down_write_trylock(
1573				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1574				iput(inode);
1575				sbi->skipped_gc_rwsem++;
1576				continue;
1577			}
1578
1579			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1580								ofs_in_node;
1581
1582			if (f2fs_post_read_required(inode)) {
1583				int err = ra_data_block(inode, start_bidx);
1584
1585				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1586				if (err) {
1587					iput(inode);
1588					continue;
1589				}
1590				add_gc_inode(gc_list, inode);
1591				continue;
1592			}
1593
1594			data_page = f2fs_get_read_data_page(inode, start_bidx,
1595							REQ_RAHEAD, true, NULL);
1596			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1597			if (IS_ERR(data_page)) {
1598				iput(inode);
1599				continue;
1600			}
1601
1602			f2fs_put_page(data_page, 0);
1603			add_gc_inode(gc_list, inode);
1604			continue;
1605		}
1606
1607		/* phase 4 */
1608		inode = find_gc_inode(gc_list, dni.ino);
1609		if (inode) {
1610			struct f2fs_inode_info *fi = F2FS_I(inode);
1611			bool locked = false;
1612			int err;
1613
1614			if (S_ISREG(inode->i_mode)) {
1615				if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1616					sbi->skipped_gc_rwsem++;
1617					continue;
1618				}
1619				if (!f2fs_down_write_trylock(
1620						&fi->i_gc_rwsem[READ])) {
1621					sbi->skipped_gc_rwsem++;
1622					f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1623					continue;
1624				}
1625				locked = true;
1626
1627				/* wait for all inflight aio data */
1628				inode_dio_wait(inode);
1629			}
1630
1631			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1632								+ ofs_in_node;
1633			if (f2fs_post_read_required(inode))
1634				err = move_data_block(inode, start_bidx,
1635							gc_type, segno, off);
1636			else
1637				err = move_data_page(inode, start_bidx, gc_type,
1638								segno, off);
1639
1640			if (!err && (gc_type == FG_GC ||
1641					f2fs_post_read_required(inode)))
1642				submitted++;
1643
1644			if (locked) {
1645				f2fs_up_write(&fi->i_gc_rwsem[READ]);
1646				f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1647			}
1648
1649			stat_inc_data_blk_count(sbi, 1, gc_type);
1650		}
1651	}
1652
1653	if (++phase < 5)
1654		goto next_step;
1655
1656	return submitted;
1657}
1658
1659static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1660			int gc_type)
1661{
1662	struct sit_info *sit_i = SIT_I(sbi);
1663	int ret;
1664
1665	down_write(&sit_i->sentry_lock);
1666	ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
 
1667	up_write(&sit_i->sentry_lock);
1668	return ret;
1669}
1670
1671static int do_garbage_collect(struct f2fs_sb_info *sbi,
1672				unsigned int start_segno,
1673				struct gc_inode_list *gc_list, int gc_type,
1674				bool force_migrate)
1675{
1676	struct page *sum_page;
1677	struct f2fs_summary_block *sum;
1678	struct blk_plug plug;
1679	unsigned int segno = start_segno;
1680	unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
 
1681	int seg_freed = 0, migrated = 0;
1682	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1683						SUM_TYPE_DATA : SUM_TYPE_NODE;
1684	unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1685	int submitted = 0;
1686
1687	if (__is_large_section(sbi))
1688		end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1689
1690	/*
1691	 * zone-capacity can be less than zone-size in zoned devices,
1692	 * resulting in less than expected usable segments in the zone,
1693	 * calculate the end segno in the zone which can be garbage collected
1694	 */
1695	if (f2fs_sb_has_blkzoned(sbi))
1696		end_segno -= SEGS_PER_SEC(sbi) -
1697					f2fs_usable_segs_in_sec(sbi, segno);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1698
1699	sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1700
1701	/* readahead multi ssa blocks those have contiguous address */
1702	if (__is_large_section(sbi))
1703		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1704					end_segno - segno, META_SSA, true);
1705
1706	/* reference all summary page */
1707	while (segno < end_segno) {
1708		sum_page = f2fs_get_sum_page(sbi, segno++);
1709		if (IS_ERR(sum_page)) {
1710			int err = PTR_ERR(sum_page);
1711
1712			end_segno = segno - 1;
1713			for (segno = start_segno; segno < end_segno; segno++) {
1714				sum_page = find_get_page(META_MAPPING(sbi),
1715						GET_SUM_BLOCK(sbi, segno));
1716				f2fs_put_page(sum_page, 0);
1717				f2fs_put_page(sum_page, 0);
1718			}
1719			return err;
1720		}
1721		unlock_page(sum_page);
1722	}
1723
1724	blk_start_plug(&plug);
1725
1726	for (segno = start_segno; segno < end_segno; segno++) {
1727
1728		/* find segment summary of victim */
1729		sum_page = find_get_page(META_MAPPING(sbi),
1730					GET_SUM_BLOCK(sbi, segno));
1731		f2fs_put_page(sum_page, 0);
1732
1733		if (get_valid_blocks(sbi, segno, false) == 0)
1734			goto freed;
1735		if (gc_type == BG_GC && __is_large_section(sbi) &&
1736				migrated >= sbi->migration_granularity)
1737			goto skip;
1738		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1739			goto skip;
1740
1741		sum = page_address(sum_page);
1742		if (type != GET_SUM_TYPE((&sum->footer))) {
1743			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1744				 segno, type, GET_SUM_TYPE((&sum->footer)));
1745			set_sbi_flag(sbi, SBI_NEED_FSCK);
1746			f2fs_stop_checkpoint(sbi, false,
1747				STOP_CP_REASON_CORRUPTED_SUMMARY);
1748			goto skip;
1749		}
1750
1751		/*
1752		 * this is to avoid deadlock:
1753		 * - lock_page(sum_page)         - f2fs_replace_block
1754		 *  - check_valid_map()            - down_write(sentry_lock)
1755		 *   - down_read(sentry_lock)     - change_curseg()
1756		 *                                  - lock_page(sum_page)
1757		 */
1758		if (type == SUM_TYPE_NODE)
1759			submitted += gc_node_segment(sbi, sum->entries, segno,
1760								gc_type);
1761		else
1762			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1763							segno, gc_type,
1764							force_migrate);
1765
1766		stat_inc_gc_seg_count(sbi, data_type, gc_type);
1767		sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1768		migrated++;
1769
1770freed:
1771		if (gc_type == FG_GC &&
1772				get_valid_blocks(sbi, segno, false) == 0)
1773			seg_freed++;
1774
1775		if (__is_large_section(sbi))
1776			sbi->next_victim_seg[gc_type] =
1777				(segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
 
1778skip:
1779		f2fs_put_page(sum_page, 0);
1780	}
1781
1782	if (submitted)
1783		f2fs_submit_merged_write(sbi, data_type);
1784
1785	blk_finish_plug(&plug);
1786
1787	if (migrated)
1788		stat_inc_gc_sec_count(sbi, data_type, gc_type);
1789
1790	return seg_freed;
1791}
1792
1793int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1794{
1795	int gc_type = gc_control->init_gc_type;
1796	unsigned int segno = gc_control->victim_segno;
1797	int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1798	int ret = 0;
1799	struct cp_control cpc;
1800	struct gc_inode_list gc_list = {
1801		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1802		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1803	};
1804	unsigned int skipped_round = 0, round = 0;
1805	unsigned int upper_secs;
1806
1807	trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1808				gc_control->nr_free_secs,
1809				get_pages(sbi, F2FS_DIRTY_NODES),
1810				get_pages(sbi, F2FS_DIRTY_DENTS),
1811				get_pages(sbi, F2FS_DIRTY_IMETA),
1812				free_sections(sbi),
1813				free_segments(sbi),
1814				reserved_segments(sbi),
1815				prefree_segments(sbi));
1816
1817	cpc.reason = __get_cp_reason(sbi);
1818gc_more:
1819	sbi->skipped_gc_rwsem = 0;
1820	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1821		ret = -EINVAL;
1822		goto stop;
1823	}
1824	if (unlikely(f2fs_cp_error(sbi))) {
1825		ret = -EIO;
1826		goto stop;
1827	}
1828
1829	/* Let's run FG_GC, if we don't have enough space. */
1830	if (has_not_enough_free_secs(sbi, 0, 0)) {
1831		gc_type = FG_GC;
1832
1833		/*
1834		 * For example, if there are many prefree_segments below given
1835		 * threshold, we can make them free by checkpoint. Then, we
1836		 * secure free segments which doesn't need fggc any more.
1837		 */
1838		if (prefree_segments(sbi)) {
1839			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1840			ret = f2fs_write_checkpoint(sbi, &cpc);
1841			if (ret)
1842				goto stop;
1843			/* Reset due to checkpoint */
1844			sec_freed = 0;
1845		}
1846	}
1847
1848	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1849	if (gc_type == BG_GC && gc_control->no_bg_gc) {
1850		ret = -EINVAL;
1851		goto stop;
1852	}
1853retry:
1854	ret = __get_victim(sbi, &segno, gc_type);
1855	if (ret) {
1856		/* allow to search victim from sections has pinned data */
1857		if (ret == -ENODATA && gc_type == FG_GC &&
1858				f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1859			f2fs_unpin_all_sections(sbi, false);
1860			goto retry;
1861		}
1862		goto stop;
1863	}
1864
1865	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1866				gc_control->should_migrate_blocks);
 
1867	if (seg_freed < 0)
1868		goto stop;
1869
1870	total_freed += seg_freed;
1871
1872	if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
1873		sec_freed++;
1874		total_sec_freed++;
1875	}
1876
 
 
 
1877	if (gc_type == FG_GC) {
1878		sbi->cur_victim_sec = NULL_SEGNO;
1879
1880		if (has_enough_free_secs(sbi, sec_freed, 0)) {
1881			if (!gc_control->no_bg_gc &&
1882			    total_sec_freed < gc_control->nr_free_secs)
1883				goto go_gc_more;
1884			goto stop;
1885		}
1886		if (sbi->skipped_gc_rwsem)
1887			skipped_round++;
1888		round++;
1889		if (skipped_round > MAX_SKIP_GC_COUNT &&
1890				skipped_round * 2 >= round) {
1891			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1892			ret = f2fs_write_checkpoint(sbi, &cpc);
1893			goto stop;
1894		}
1895	} else if (has_enough_free_secs(sbi, 0, 0)) {
1896		goto stop;
1897	}
1898
1899	__get_secs_required(sbi, NULL, &upper_secs, NULL);
1900
1901	/*
1902	 * Write checkpoint to reclaim prefree segments.
1903	 * We need more three extra sections for writer's data/node/dentry.
1904	 */
1905	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1906				prefree_segments(sbi)) {
1907		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1908		ret = f2fs_write_checkpoint(sbi, &cpc);
1909		if (ret)
1910			goto stop;
1911		/* Reset due to checkpoint */
1912		sec_freed = 0;
1913	}
1914go_gc_more:
1915	segno = NULL_SEGNO;
1916	goto gc_more;
1917
1918stop:
1919	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1920	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1921
1922	if (gc_type == FG_GC)
1923		f2fs_unpin_all_sections(sbi, true);
1924
1925	trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1926				get_pages(sbi, F2FS_DIRTY_NODES),
1927				get_pages(sbi, F2FS_DIRTY_DENTS),
1928				get_pages(sbi, F2FS_DIRTY_IMETA),
1929				free_sections(sbi),
1930				free_segments(sbi),
1931				reserved_segments(sbi),
1932				prefree_segments(sbi));
1933
1934	f2fs_up_write(&sbi->gc_lock);
1935
1936	put_gc_inode(&gc_list);
1937
1938	if (gc_control->err_gc_skipped && !ret)
1939		ret = total_sec_freed ? 0 : -EAGAIN;
1940	return ret;
1941}
1942
1943int __init f2fs_create_garbage_collection_cache(void)
1944{
1945	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1946					sizeof(struct victim_entry));
1947	return victim_entry_slab ? 0 : -ENOMEM;
1948}
1949
1950void f2fs_destroy_garbage_collection_cache(void)
1951{
1952	kmem_cache_destroy(victim_entry_slab);
1953}
1954
1955static void init_atgc_management(struct f2fs_sb_info *sbi)
1956{
1957	struct atgc_management *am = &sbi->am;
1958
1959	if (test_opt(sbi, ATGC) &&
1960		SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1961		am->atgc_enabled = true;
1962
1963	am->root = RB_ROOT_CACHED;
1964	INIT_LIST_HEAD(&am->victim_list);
1965	am->victim_count = 0;
1966
1967	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1968	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1969	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1970	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1971}
1972
1973void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1974{
1975	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1976
1977	/* give warm/cold data area from slower device */
1978	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1979		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1980				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1981
1982	init_atgc_management(sbi);
1983}
1984
1985int f2fs_gc_range(struct f2fs_sb_info *sbi,
1986		unsigned int start_seg, unsigned int end_seg,
1987		bool dry_run, unsigned int dry_run_sections)
1988{
1989	unsigned int segno;
1990	unsigned int gc_secs = dry_run_sections;
1991
1992	if (unlikely(f2fs_cp_error(sbi)))
1993		return -EIO;
1994
1995	for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
1996		struct gc_inode_list gc_list = {
1997			.ilist = LIST_HEAD_INIT(gc_list.ilist),
1998			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1999		};
2000
2001		do_garbage_collect(sbi, segno, &gc_list, FG_GC,
2002						dry_run_sections == 0);
2003		put_gc_inode(&gc_list);
2004
2005		if (!dry_run && get_valid_blocks(sbi, segno, true))
2006			return -EAGAIN;
2007		if (dry_run && dry_run_sections &&
2008		    !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2009			break;
2010
2011		if (fatal_signal_pending(current))
2012			return -ERESTARTSYS;
2013	}
2014
2015	return 0;
2016}
2017
2018static int free_segment_range(struct f2fs_sb_info *sbi,
2019				unsigned int secs, bool dry_run)
2020{
2021	unsigned int next_inuse, start, end;
2022	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2023	int gc_mode, gc_type;
2024	int err = 0;
2025	int type;
2026
2027	/* Force block allocation for GC */
2028	MAIN_SECS(sbi) -= secs;
2029	start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2030	end = MAIN_SEGS(sbi) - 1;
2031
2032	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2033	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2034		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2035			SIT_I(sbi)->last_victim[gc_mode] = 0;
2036
2037	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2038		if (sbi->next_victim_seg[gc_type] >= start)
2039			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2040	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2041
2042	/* Move out cursegs from the target range */
2043	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2044		err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2045		if (err)
2046			goto out;
2047	}
2048
2049	/* do GC to move out valid blocks in the range */
2050	err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2051	if (err || dry_run)
2052		goto out;
2053
2054	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2055	err = f2fs_write_checkpoint(sbi, &cpc);
2056	if (err)
2057		goto out;
2058
2059	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2060	if (next_inuse <= end) {
2061		f2fs_err(sbi, "segno %u should be free but still inuse!",
2062			 next_inuse);
2063		f2fs_bug_on(sbi, 1);
2064	}
2065out:
2066	MAIN_SECS(sbi) += secs;
2067	return err;
2068}
2069
2070static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2071{
2072	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2073	int section_count;
2074	int segment_count;
2075	int segment_count_main;
2076	long long block_count;
2077	int segs = secs * SEGS_PER_SEC(sbi);
2078
2079	f2fs_down_write(&sbi->sb_lock);
2080
2081	section_count = le32_to_cpu(raw_sb->section_count);
2082	segment_count = le32_to_cpu(raw_sb->segment_count);
2083	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2084	block_count = le64_to_cpu(raw_sb->block_count);
2085
2086	raw_sb->section_count = cpu_to_le32(section_count + secs);
2087	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2088	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2089	raw_sb->block_count = cpu_to_le64(block_count +
2090			(long long)SEGS_TO_BLKS(sbi, segs));
2091	if (f2fs_is_multi_device(sbi)) {
2092		int last_dev = sbi->s_ndevs - 1;
2093		int dev_segs =
2094			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2095
2096		raw_sb->devs[last_dev].total_segments =
2097						cpu_to_le32(dev_segs + segs);
2098	}
2099
2100	f2fs_up_write(&sbi->sb_lock);
2101}
2102
2103static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2104{
2105	int segs = secs * SEGS_PER_SEC(sbi);
2106	long long blks = SEGS_TO_BLKS(sbi, segs);
2107	long long user_block_count =
2108				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2109
2110	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2111	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2112	MAIN_SECS(sbi) += secs;
2113	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2114	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2115	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2116
2117	if (f2fs_is_multi_device(sbi)) {
2118		int last_dev = sbi->s_ndevs - 1;
2119
2120		FDEV(last_dev).total_segments =
2121				(int)FDEV(last_dev).total_segments + segs;
2122		FDEV(last_dev).end_blk =
2123				(long long)FDEV(last_dev).end_blk + blks;
2124#ifdef CONFIG_BLK_DEV_ZONED
2125		FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2126					div_u64(blks, sbi->blocks_per_blkz);
2127#endif
2128	}
2129}
2130
2131int f2fs_resize_fs(struct file *filp, __u64 block_count)
2132{
2133	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2134	__u64 old_block_count, shrunk_blocks;
2135	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2136	unsigned int secs;
2137	int err = 0;
2138	__u32 rem;
2139
2140	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2141	if (block_count > old_block_count)
2142		return -EINVAL;
2143
2144	if (f2fs_is_multi_device(sbi)) {
2145		int last_dev = sbi->s_ndevs - 1;
2146		__u64 last_segs = FDEV(last_dev).total_segments;
2147
2148		if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2149								old_block_count)
2150			return -EINVAL;
2151	}
2152
2153	/* new fs size should align to section size */
2154	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2155	if (rem)
2156		return -EINVAL;
2157
2158	if (block_count == old_block_count)
2159		return 0;
2160
2161	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2162		f2fs_err(sbi, "Should run fsck to repair first.");
2163		return -EFSCORRUPTED;
2164	}
2165
2166	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2167		f2fs_err(sbi, "Checkpoint should be enabled.");
2168		return -EINVAL;
2169	}
2170
2171	err = mnt_want_write_file(filp);
2172	if (err)
2173		return err;
2174
2175	shrunk_blocks = old_block_count - block_count;
2176	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2177
2178	/* stop other GC */
2179	if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2180		err = -EAGAIN;
2181		goto out_drop_write;
2182	}
2183
2184	/* stop CP to protect MAIN_SEC in free_segment_range */
2185	f2fs_lock_op(sbi);
2186
2187	spin_lock(&sbi->stat_lock);
2188	if (shrunk_blocks + valid_user_blocks(sbi) +
2189		sbi->current_reserved_blocks + sbi->unusable_block_count +
2190		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2191		err = -ENOSPC;
2192	spin_unlock(&sbi->stat_lock);
2193
2194	if (err)
2195		goto out_unlock;
2196
2197	err = free_segment_range(sbi, secs, true);
2198
2199out_unlock:
2200	f2fs_unlock_op(sbi);
2201	f2fs_up_write(&sbi->gc_lock);
2202out_drop_write:
2203	mnt_drop_write_file(filp);
2204	if (err)
2205		return err;
2206
2207	err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2208	if (err)
2209		return err;
2210
2211	if (f2fs_readonly(sbi->sb)) {
2212		err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2213		if (err)
2214			return err;
2215		return -EROFS;
2216	}
2217
2218	f2fs_down_write(&sbi->gc_lock);
2219	f2fs_down_write(&sbi->cp_global_sem);
2220
2221	spin_lock(&sbi->stat_lock);
2222	if (shrunk_blocks + valid_user_blocks(sbi) +
2223		sbi->current_reserved_blocks + sbi->unusable_block_count +
2224		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2225		err = -ENOSPC;
2226	else
2227		sbi->user_block_count -= shrunk_blocks;
2228	spin_unlock(&sbi->stat_lock);
2229	if (err)
2230		goto out_err;
2231
2232	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2233	err = free_segment_range(sbi, secs, false);
2234	if (err)
2235		goto recover_out;
2236
2237	update_sb_metadata(sbi, -secs);
2238
2239	err = f2fs_commit_super(sbi, false);
2240	if (err) {
2241		update_sb_metadata(sbi, secs);
2242		goto recover_out;
2243	}
2244
2245	update_fs_metadata(sbi, -secs);
2246	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2247	set_sbi_flag(sbi, SBI_IS_DIRTY);
2248
2249	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2250	err = f2fs_write_checkpoint(sbi, &cpc);
2251	if (err) {
2252		update_fs_metadata(sbi, secs);
2253		update_sb_metadata(sbi, secs);
2254		f2fs_commit_super(sbi, false);
2255	}
2256recover_out:
2257	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2258	if (err) {
2259		set_sbi_flag(sbi, SBI_NEED_FSCK);
2260		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2261
2262		spin_lock(&sbi->stat_lock);
2263		sbi->user_block_count += shrunk_blocks;
2264		spin_unlock(&sbi->stat_lock);
2265	}
2266out_err:
2267	f2fs_up_write(&sbi->cp_global_sem);
2268	f2fs_up_write(&sbi->gc_lock);
2269	thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2270	return err;
2271}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/gc.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/f2fs_fs.h>
  12#include <linux/kthread.h>
  13#include <linux/delay.h>
  14#include <linux/freezer.h>
  15#include <linux/sched/signal.h>
  16#include <linux/random.h>
  17#include <linux/sched/mm.h>
  18
  19#include "f2fs.h"
  20#include "node.h"
  21#include "segment.h"
  22#include "gc.h"
  23#include "iostat.h"
  24#include <trace/events/f2fs.h>
  25
  26static struct kmem_cache *victim_entry_slab;
  27
  28static unsigned int count_bits(const unsigned long *addr,
  29				unsigned int offset, unsigned int len);
  30
  31static int gc_thread_func(void *data)
  32{
  33	struct f2fs_sb_info *sbi = data;
  34	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  35	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
  36	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
  37	unsigned int wait_ms;
  38	struct f2fs_gc_control gc_control = {
  39		.victim_segno = NULL_SEGNO,
  40		.should_migrate_blocks = false,
  41		.err_gc_skipped = false };
  42
  43	wait_ms = gc_th->min_sleep_time;
  44
  45	set_freezable();
  46	do {
  47		bool sync_mode, foreground = false;
  48
  49		wait_event_freezable_timeout(*wq,
  50				kthread_should_stop() ||
  51				waitqueue_active(fggc_wq) ||
  52				gc_th->gc_wake,
  53				msecs_to_jiffies(wait_ms));
  54
  55		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
  56			foreground = true;
  57
  58		/* give it a try one time */
  59		if (gc_th->gc_wake)
  60			gc_th->gc_wake = false;
  61
  62		if (f2fs_readonly(sbi->sb)) {
  63			stat_other_skip_bggc_count(sbi);
  64			continue;
  65		}
  66		if (kthread_should_stop())
  67			break;
  68
  69		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
  70			increase_sleep_time(gc_th, &wait_ms);
  71			stat_other_skip_bggc_count(sbi);
  72			continue;
  73		}
  74
  75		if (time_to_inject(sbi, FAULT_CHECKPOINT))
  76			f2fs_stop_checkpoint(sbi, false,
  77					STOP_CP_REASON_FAULT_INJECT);
  78
  79		if (!sb_start_write_trylock(sbi->sb)) {
  80			stat_other_skip_bggc_count(sbi);
  81			continue;
  82		}
  83
  84		gc_control.one_time = false;
  85
  86		/*
  87		 * [GC triggering condition]
  88		 * 0. GC is not conducted currently.
  89		 * 1. There are enough dirty segments.
  90		 * 2. IO subsystem is idle by checking the # of writeback pages.
  91		 * 3. IO subsystem is idle by checking the # of requests in
  92		 *    bdev's request list.
  93		 *
  94		 * Note) We have to avoid triggering GCs frequently.
  95		 * Because it is possible that some segments can be
  96		 * invalidated soon after by user update or deletion.
  97		 * So, I'd like to wait some time to collect dirty segments.
  98		 */
  99		if (sbi->gc_mode == GC_URGENT_HIGH ||
 100				sbi->gc_mode == GC_URGENT_MID) {
 101			wait_ms = gc_th->urgent_sleep_time;
 102			f2fs_down_write(&sbi->gc_lock);
 103			goto do_gc;
 104		}
 105
 106		if (foreground) {
 107			f2fs_down_write(&sbi->gc_lock);
 108			goto do_gc;
 109		} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
 110			stat_other_skip_bggc_count(sbi);
 111			goto next;
 112		}
 113
 114		if (!is_idle(sbi, GC_TIME)) {
 115			increase_sleep_time(gc_th, &wait_ms);
 116			f2fs_up_write(&sbi->gc_lock);
 117			stat_io_skip_bggc_count(sbi);
 118			goto next;
 119		}
 120
 121		if (f2fs_sb_has_blkzoned(sbi)) {
 122			if (has_enough_free_blocks(sbi,
 123				gc_th->no_zoned_gc_percent)) {
 124				wait_ms = gc_th->no_gc_sleep_time;
 125				f2fs_up_write(&sbi->gc_lock);
 126				goto next;
 127			}
 128			if (wait_ms == gc_th->no_gc_sleep_time)
 129				wait_ms = gc_th->max_sleep_time;
 130		}
 131
 132		if (need_to_boost_gc(sbi)) {
 133			decrease_sleep_time(gc_th, &wait_ms);
 134			if (f2fs_sb_has_blkzoned(sbi))
 135				gc_control.one_time = true;
 136		} else {
 137			increase_sleep_time(gc_th, &wait_ms);
 138		}
 139do_gc:
 140		stat_inc_gc_call_count(sbi, foreground ?
 141					FOREGROUND : BACKGROUND);
 142
 143		sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
 144				gc_control.one_time;
 145
 146		/* foreground GC was been triggered via f2fs_balance_fs() */
 147		if (foreground)
 148			sync_mode = false;
 149
 150		gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
 151		gc_control.no_bg_gc = foreground;
 152		gc_control.nr_free_secs = foreground ? 1 : 0;
 153
 154		/* if return value is not zero, no victim was selected */
 155		if (f2fs_gc(sbi, &gc_control)) {
 156			/* don't bother wait_ms by foreground gc */
 157			if (!foreground)
 158				wait_ms = gc_th->no_gc_sleep_time;
 159		} else {
 160			/* reset wait_ms to default sleep time */
 161			if (wait_ms == gc_th->no_gc_sleep_time)
 162				wait_ms = gc_th->min_sleep_time;
 163		}
 164
 165		if (foreground)
 166			wake_up_all(&gc_th->fggc_wq);
 167
 168		trace_f2fs_background_gc(sbi->sb, wait_ms,
 169				prefree_segments(sbi), free_segments(sbi));
 170
 171		/* balancing f2fs's metadata periodically */
 172		f2fs_balance_fs_bg(sbi, true);
 173next:
 174		if (sbi->gc_mode != GC_NORMAL) {
 175			spin_lock(&sbi->gc_remaining_trials_lock);
 176			if (sbi->gc_remaining_trials) {
 177				sbi->gc_remaining_trials--;
 178				if (!sbi->gc_remaining_trials)
 179					sbi->gc_mode = GC_NORMAL;
 180			}
 181			spin_unlock(&sbi->gc_remaining_trials_lock);
 182		}
 183		sb_end_write(sbi->sb);
 184
 185	} while (!kthread_should_stop());
 186	return 0;
 187}
 188
 189int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
 190{
 191	struct f2fs_gc_kthread *gc_th;
 192	dev_t dev = sbi->sb->s_bdev->bd_dev;
 193
 194	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
 195	if (!gc_th)
 196		return -ENOMEM;
 197
 198	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
 199	gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
 200
 201	if (f2fs_sb_has_blkzoned(sbi)) {
 202		gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
 203		gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
 204		gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
 205		gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
 206		gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
 207	} else {
 208		gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
 209		gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
 210		gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
 211		gc_th->no_zoned_gc_percent = 0;
 212		gc_th->boost_zoned_gc_percent = 0;
 213	}
 214
 215	gc_th->gc_wake = false;
 216
 217	sbi->gc_thread = gc_th;
 218	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
 219	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
 220	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
 221			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 222	if (IS_ERR(gc_th->f2fs_gc_task)) {
 223		int err = PTR_ERR(gc_th->f2fs_gc_task);
 224
 225		kfree(gc_th);
 226		sbi->gc_thread = NULL;
 227		return err;
 228	}
 229
 230	return 0;
 231}
 232
 233void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
 234{
 235	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 236
 237	if (!gc_th)
 238		return;
 239	kthread_stop(gc_th->f2fs_gc_task);
 240	wake_up_all(&gc_th->fggc_wq);
 241	kfree(gc_th);
 242	sbi->gc_thread = NULL;
 243}
 244
 245static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
 246{
 247	int gc_mode;
 248
 249	if (gc_type == BG_GC) {
 250		if (sbi->am.atgc_enabled)
 251			gc_mode = GC_AT;
 252		else
 253			gc_mode = GC_CB;
 254	} else {
 255		gc_mode = GC_GREEDY;
 256	}
 257
 258	switch (sbi->gc_mode) {
 259	case GC_IDLE_CB:
 260	case GC_URGENT_LOW:
 261	case GC_URGENT_MID:
 262		gc_mode = GC_CB;
 263		break;
 264	case GC_IDLE_GREEDY:
 265	case GC_URGENT_HIGH:
 266		gc_mode = GC_GREEDY;
 267		break;
 268	case GC_IDLE_AT:
 269		gc_mode = GC_AT;
 270		break;
 271	}
 272
 273	return gc_mode;
 274}
 275
 276static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 277			int type, struct victim_sel_policy *p)
 278{
 279	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 280
 281	if (p->alloc_mode == SSR) {
 282		p->gc_mode = GC_GREEDY;
 283		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 284		p->max_search = dirty_i->nr_dirty[type];
 285		p->ofs_unit = 1;
 286	} else if (p->alloc_mode == AT_SSR) {
 287		p->gc_mode = GC_GREEDY;
 288		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 289		p->max_search = dirty_i->nr_dirty[type];
 290		p->ofs_unit = 1;
 291	} else {
 292		p->gc_mode = select_gc_type(sbi, gc_type);
 293		p->ofs_unit = SEGS_PER_SEC(sbi);
 294		if (__is_large_section(sbi)) {
 295			p->dirty_bitmap = dirty_i->dirty_secmap;
 296			p->max_search = count_bits(p->dirty_bitmap,
 297						0, MAIN_SECS(sbi));
 298		} else {
 299			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
 300			p->max_search = dirty_i->nr_dirty[DIRTY];
 301		}
 302	}
 303
 304	/*
 305	 * adjust candidates range, should select all dirty segments for
 306	 * foreground GC and urgent GC cases.
 307	 */
 308	if (gc_type != FG_GC &&
 309			(sbi->gc_mode != GC_URGENT_HIGH) &&
 310			(p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
 311			p->max_search > sbi->max_victim_search)
 312		p->max_search = sbi->max_victim_search;
 313
 314	/* let's select beginning hot/small space first. */
 315	if (f2fs_need_rand_seg(sbi))
 316		p->offset = get_random_u32_below(MAIN_SECS(sbi) *
 317						SEGS_PER_SEC(sbi));
 318	else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
 319		p->offset = 0;
 320	else
 321		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
 322}
 323
 324static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 325				struct victim_sel_policy *p)
 326{
 327	/* SSR allocates in a segment unit */
 328	if (p->alloc_mode == SSR)
 329		return BLKS_PER_SEG(sbi);
 330	else if (p->alloc_mode == AT_SSR)
 331		return UINT_MAX;
 332
 333	/* LFS */
 334	if (p->gc_mode == GC_GREEDY)
 335		return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
 336	else if (p->gc_mode == GC_CB)
 337		return UINT_MAX;
 338	else if (p->gc_mode == GC_AT)
 339		return UINT_MAX;
 340	else /* No other gc_mode */
 341		return 0;
 342}
 343
 344static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 345{
 346	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 347	unsigned int secno;
 348
 349	/*
 350	 * If the gc_type is FG_GC, we can select victim segments
 351	 * selected by background GC before.
 352	 * Those segments guarantee they have small valid blocks.
 353	 */
 354	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
 355		if (sec_usage_check(sbi, secno))
 356			continue;
 357		clear_bit(secno, dirty_i->victim_secmap);
 358		return GET_SEG_FROM_SEC(sbi, secno);
 359	}
 360	return NULL_SEGNO;
 361}
 362
 363static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 364{
 365	struct sit_info *sit_i = SIT_I(sbi);
 
 
 366	unsigned long long mtime = 0;
 367	unsigned int vblocks;
 368	unsigned char age = 0;
 369	unsigned char u;
 370	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
 
 371
 372	mtime = f2fs_get_section_mtime(sbi, segno);
 373	f2fs_bug_on(sbi, mtime == INVALID_MTIME);
 374	vblocks = get_valid_blocks(sbi, segno, true);
 
 
 375	vblocks = div_u64(vblocks, usable_segs_per_sec);
 376
 377	u = BLKS_TO_SEGS(sbi, vblocks * 100);
 378
 379	/* Handle if the system time has changed by the user */
 380	if (mtime < sit_i->min_mtime)
 381		sit_i->min_mtime = mtime;
 382	if (mtime > sit_i->max_mtime)
 383		sit_i->max_mtime = mtime;
 384	if (sit_i->max_mtime != sit_i->min_mtime)
 385		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
 386				sit_i->max_mtime - sit_i->min_mtime);
 387
 388	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 389}
 390
 391static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
 392			unsigned int segno, struct victim_sel_policy *p)
 393{
 394	if (p->alloc_mode == SSR)
 395		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
 396
 397	if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >=
 398		CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio /
 399		100))
 400		return UINT_MAX;
 401
 402	/* alloc_mode == LFS */
 403	if (p->gc_mode == GC_GREEDY)
 404		return get_valid_blocks(sbi, segno, true);
 405	else if (p->gc_mode == GC_CB)
 406		return get_cb_cost(sbi, segno);
 407
 408	f2fs_bug_on(sbi, 1);
 409	return 0;
 410}
 411
 412static unsigned int count_bits(const unsigned long *addr,
 413				unsigned int offset, unsigned int len)
 414{
 415	unsigned int end = offset + len, sum = 0;
 416
 417	while (offset < end) {
 418		if (test_bit(offset++, addr))
 419			++sum;
 420	}
 421	return sum;
 422}
 423
 424static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
 425				struct rb_root_cached *root)
 426{
 427#ifdef CONFIG_F2FS_CHECK_FS
 428	struct rb_node *cur = rb_first_cached(root), *next;
 429	struct victim_entry *cur_ve, *next_ve;
 430
 431	while (cur) {
 432		next = rb_next(cur);
 433		if (!next)
 434			return true;
 435
 436		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
 437		next_ve = rb_entry(next, struct victim_entry, rb_node);
 438
 439		if (cur_ve->mtime > next_ve->mtime) {
 440			f2fs_info(sbi, "broken victim_rbtree, "
 441				"cur_mtime(%llu) next_mtime(%llu)",
 442				cur_ve->mtime, next_ve->mtime);
 443			return false;
 444		}
 445		cur = next;
 446	}
 447#endif
 448	return true;
 449}
 450
 451static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
 452					unsigned long long mtime)
 453{
 454	struct atgc_management *am = &sbi->am;
 455	struct rb_node *node = am->root.rb_root.rb_node;
 456	struct victim_entry *ve = NULL;
 457
 458	while (node) {
 459		ve = rb_entry(node, struct victim_entry, rb_node);
 460
 461		if (mtime < ve->mtime)
 462			node = node->rb_left;
 463		else
 464			node = node->rb_right;
 465	}
 466	return ve;
 467}
 468
 469static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
 470		unsigned long long mtime, unsigned int segno)
 471{
 472	struct atgc_management *am = &sbi->am;
 473	struct victim_entry *ve;
 474
 475	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
 476
 477	ve->mtime = mtime;
 478	ve->segno = segno;
 479
 480	list_add_tail(&ve->list, &am->victim_list);
 481	am->victim_count++;
 482
 483	return ve;
 484}
 485
 486static void __insert_victim_entry(struct f2fs_sb_info *sbi,
 487				unsigned long long mtime, unsigned int segno)
 488{
 489	struct atgc_management *am = &sbi->am;
 490	struct rb_root_cached *root = &am->root;
 491	struct rb_node **p = &root->rb_root.rb_node;
 492	struct rb_node *parent = NULL;
 493	struct victim_entry *ve;
 494	bool left_most = true;
 495
 496	/* look up rb tree to find parent node */
 497	while (*p) {
 498		parent = *p;
 499		ve = rb_entry(parent, struct victim_entry, rb_node);
 500
 501		if (mtime < ve->mtime) {
 502			p = &(*p)->rb_left;
 503		} else {
 504			p = &(*p)->rb_right;
 505			left_most = false;
 506		}
 507	}
 508
 509	ve = __create_victim_entry(sbi, mtime, segno);
 510
 511	rb_link_node(&ve->rb_node, parent, p);
 512	rb_insert_color_cached(&ve->rb_node, root, left_most);
 513}
 514
 515static void add_victim_entry(struct f2fs_sb_info *sbi,
 516				struct victim_sel_policy *p, unsigned int segno)
 517{
 518	struct sit_info *sit_i = SIT_I(sbi);
 
 
 519	unsigned long long mtime = 0;
 
 520
 521	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 522		if (p->gc_mode == GC_AT &&
 523			get_valid_blocks(sbi, segno, true) == 0)
 524			return;
 525	}
 526
 527	mtime = f2fs_get_section_mtime(sbi, segno);
 528	f2fs_bug_on(sbi, mtime == INVALID_MTIME);
 
 529
 530	/* Handle if the system time has changed by the user */
 531	if (mtime < sit_i->min_mtime)
 532		sit_i->min_mtime = mtime;
 533	if (mtime > sit_i->max_mtime)
 534		sit_i->max_mtime = mtime;
 535	if (mtime < sit_i->dirty_min_mtime)
 536		sit_i->dirty_min_mtime = mtime;
 537	if (mtime > sit_i->dirty_max_mtime)
 538		sit_i->dirty_max_mtime = mtime;
 539
 540	/* don't choose young section as candidate */
 541	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
 542		return;
 543
 544	__insert_victim_entry(sbi, mtime, segno);
 545}
 546
 547static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
 548						struct victim_sel_policy *p)
 549{
 550	struct sit_info *sit_i = SIT_I(sbi);
 551	struct atgc_management *am = &sbi->am;
 552	struct rb_root_cached *root = &am->root;
 553	struct rb_node *node;
 554	struct victim_entry *ve;
 555	unsigned long long total_time;
 556	unsigned long long age, u, accu;
 557	unsigned long long max_mtime = sit_i->dirty_max_mtime;
 558	unsigned long long min_mtime = sit_i->dirty_min_mtime;
 559	unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
 560	unsigned int vblocks;
 561	unsigned int dirty_threshold = max(am->max_candidate_count,
 562					am->candidate_ratio *
 563					am->victim_count / 100);
 564	unsigned int age_weight = am->age_weight;
 565	unsigned int cost;
 566	unsigned int iter = 0;
 567
 568	if (max_mtime < min_mtime)
 569		return;
 570
 571	max_mtime += 1;
 572	total_time = max_mtime - min_mtime;
 573
 574	accu = div64_u64(ULLONG_MAX, total_time);
 575	accu = min_t(unsigned long long, div_u64(accu, 100),
 576					DEFAULT_ACCURACY_CLASS);
 577
 578	node = rb_first_cached(root);
 579next:
 580	ve = rb_entry_safe(node, struct victim_entry, rb_node);
 581	if (!ve)
 582		return;
 583
 584	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 585		goto skip;
 586
 587	/* age = 10000 * x% * 60 */
 588	age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
 589								age_weight;
 590
 591	vblocks = get_valid_blocks(sbi, ve->segno, true);
 592	f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
 593
 594	/* u = 10000 * x% * 40 */
 595	u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
 596							(100 - age_weight);
 597
 598	f2fs_bug_on(sbi, age + u >= UINT_MAX);
 599
 600	cost = UINT_MAX - (age + u);
 601	iter++;
 602
 603	if (cost < p->min_cost ||
 604			(cost == p->min_cost && age > p->oldest_age)) {
 605		p->min_cost = cost;
 606		p->oldest_age = age;
 607		p->min_segno = ve->segno;
 608	}
 609skip:
 610	if (iter < dirty_threshold) {
 611		node = rb_next(node);
 612		goto next;
 613	}
 614}
 615
 616/*
 617 * select candidates around source section in range of
 618 * [target - dirty_threshold, target + dirty_threshold]
 619 */
 620static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
 621						struct victim_sel_policy *p)
 622{
 623	struct sit_info *sit_i = SIT_I(sbi);
 624	struct atgc_management *am = &sbi->am;
 625	struct victim_entry *ve;
 626	unsigned long long age;
 627	unsigned long long max_mtime = sit_i->dirty_max_mtime;
 628	unsigned long long min_mtime = sit_i->dirty_min_mtime;
 629	unsigned int vblocks;
 630	unsigned int dirty_threshold = max(am->max_candidate_count,
 631					am->candidate_ratio *
 632					am->victim_count / 100);
 633	unsigned int cost, iter;
 634	int stage = 0;
 635
 636	if (max_mtime < min_mtime)
 637		return;
 638	max_mtime += 1;
 639next_stage:
 640	iter = 0;
 641	ve = __lookup_victim_entry(sbi, p->age);
 642next_node:
 643	if (!ve) {
 644		if (stage++ == 0)
 645			goto next_stage;
 646		return;
 647	}
 648
 649	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 650		goto skip_node;
 651
 652	age = max_mtime - ve->mtime;
 653
 654	vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
 655	f2fs_bug_on(sbi, !vblocks);
 656
 657	/* rare case */
 658	if (vblocks == BLKS_PER_SEG(sbi))
 659		goto skip_node;
 660
 661	iter++;
 662
 663	age = max_mtime - abs(p->age - age);
 664	cost = UINT_MAX - vblocks;
 665
 666	if (cost < p->min_cost ||
 667			(cost == p->min_cost && age > p->oldest_age)) {
 668		p->min_cost = cost;
 669		p->oldest_age = age;
 670		p->min_segno = ve->segno;
 671	}
 672skip_node:
 673	if (iter < dirty_threshold) {
 674		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
 675					rb_next(&ve->rb_node),
 676					struct victim_entry, rb_node);
 677		goto next_node;
 678	}
 679
 680	if (stage++ == 0)
 681		goto next_stage;
 682}
 683
 684static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
 685						struct victim_sel_policy *p)
 686{
 687	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
 688
 689	if (p->gc_mode == GC_AT)
 690		atgc_lookup_victim(sbi, p);
 691	else if (p->alloc_mode == AT_SSR)
 692		atssr_lookup_victim(sbi, p);
 693	else
 694		f2fs_bug_on(sbi, 1);
 695}
 696
 697static void release_victim_entry(struct f2fs_sb_info *sbi)
 698{
 699	struct atgc_management *am = &sbi->am;
 700	struct victim_entry *ve, *tmp;
 701
 702	list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
 703		list_del(&ve->list);
 704		kmem_cache_free(victim_entry_slab, ve);
 705		am->victim_count--;
 706	}
 707
 708	am->root = RB_ROOT_CACHED;
 709
 710	f2fs_bug_on(sbi, am->victim_count);
 711	f2fs_bug_on(sbi, !list_empty(&am->victim_list));
 712}
 713
 714static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
 715{
 716	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 717	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 718
 719	if (!dirty_i->enable_pin_section)
 720		return false;
 721	if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
 722		dirty_i->pinned_secmap_cnt++;
 723	return true;
 724}
 725
 726static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
 727{
 728	return dirty_i->pinned_secmap_cnt;
 729}
 730
 731static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
 732						unsigned int secno)
 733{
 734	return dirty_i->enable_pin_section &&
 735		f2fs_pinned_section_exists(dirty_i) &&
 736		test_bit(secno, dirty_i->pinned_secmap);
 737}
 738
 739static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
 740{
 741	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
 742
 743	if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
 744		memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
 745		DIRTY_I(sbi)->pinned_secmap_cnt = 0;
 746	}
 747	DIRTY_I(sbi)->enable_pin_section = enable;
 748}
 749
 750static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
 751							unsigned int segno)
 752{
 753	if (!f2fs_is_pinned_file(inode))
 754		return 0;
 755	if (gc_type != FG_GC)
 756		return -EBUSY;
 757	if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
 758		f2fs_pin_file_control(inode, true);
 759	return -EAGAIN;
 760}
 761
 762/*
 763 * This function is called from two paths.
 764 * One is garbage collection and the other is SSR segment selection.
 765 * When it is called during GC, it just gets a victim segment
 766 * and it does not remove it from dirty seglist.
 767 * When it is called from SSR segment selection, it finds a segment
 768 * which has minimum valid blocks and removes it from dirty seglist.
 769 */
 770int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
 771			int gc_type, int type, char alloc_mode,
 772			unsigned long long age, bool one_time)
 773{
 774	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 775	struct sit_info *sm = SIT_I(sbi);
 776	struct victim_sel_policy p;
 777	unsigned int secno, last_victim;
 778	unsigned int last_segment;
 779	unsigned int nsearched;
 780	bool is_atgc;
 781	int ret = 0;
 782
 783	mutex_lock(&dirty_i->seglist_lock);
 784	last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
 785
 786	p.alloc_mode = alloc_mode;
 787	p.age = age;
 788	p.age_threshold = sbi->am.age_threshold;
 789	p.one_time_gc = one_time;
 790
 791retry:
 792	select_policy(sbi, gc_type, type, &p);
 793	p.min_segno = NULL_SEGNO;
 794	p.oldest_age = 0;
 795	p.min_cost = get_max_cost(sbi, &p);
 796
 797	is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
 798	nsearched = 0;
 799
 800	if (is_atgc)
 801		SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
 802
 803	if (*result != NULL_SEGNO) {
 804		if (!get_valid_blocks(sbi, *result, false)) {
 805			ret = -ENODATA;
 806			goto out;
 807		}
 808
 809		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 810			ret = -EBUSY;
 811		else
 812			p.min_segno = *result;
 813		goto out;
 814	}
 815
 816	ret = -ENODATA;
 817	if (p.max_search == 0)
 818		goto out;
 819
 820	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
 821		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
 822			p.min_segno = sbi->next_victim_seg[BG_GC];
 823			*result = p.min_segno;
 824			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
 825			goto got_result;
 826		}
 827		if (gc_type == FG_GC &&
 828				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
 829			p.min_segno = sbi->next_victim_seg[FG_GC];
 830			*result = p.min_segno;
 831			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 832			goto got_result;
 833		}
 834	}
 835
 836	last_victim = sm->last_victim[p.gc_mode];
 837	if (p.alloc_mode == LFS && gc_type == FG_GC) {
 838		p.min_segno = check_bg_victims(sbi);
 839		if (p.min_segno != NULL_SEGNO)
 840			goto got_it;
 841	}
 842
 843	while (1) {
 844		unsigned long cost, *dirty_bitmap;
 845		unsigned int unit_no, segno;
 846
 847		dirty_bitmap = p.dirty_bitmap;
 848		unit_no = find_next_bit(dirty_bitmap,
 849				last_segment / p.ofs_unit,
 850				p.offset / p.ofs_unit);
 851		segno = unit_no * p.ofs_unit;
 852		if (segno >= last_segment) {
 853			if (sm->last_victim[p.gc_mode]) {
 854				last_segment =
 855					sm->last_victim[p.gc_mode];
 856				sm->last_victim[p.gc_mode] = 0;
 857				p.offset = 0;
 858				continue;
 859			}
 860			break;
 861		}
 862
 863		p.offset = segno + p.ofs_unit;
 864		nsearched++;
 865
 866#ifdef CONFIG_F2FS_CHECK_FS
 867		/*
 868		 * skip selecting the invalid segno (that is failed due to block
 869		 * validity check failure during GC) to avoid endless GC loop in
 870		 * such cases.
 871		 */
 872		if (test_bit(segno, sm->invalid_segmap))
 873			goto next;
 874#endif
 875
 876		secno = GET_SEC_FROM_SEG(sbi, segno);
 877
 878		if (sec_usage_check(sbi, secno))
 879			goto next;
 880
 881		/* Don't touch checkpointed data */
 882		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 883			if (p.alloc_mode == LFS) {
 884				/*
 885				 * LFS is set to find source section during GC.
 886				 * The victim should have no checkpointed data.
 887				 */
 888				if (get_ckpt_valid_blocks(sbi, segno, true))
 889					goto next;
 890			} else {
 891				/*
 892				 * SSR | AT_SSR are set to find target segment
 893				 * for writes which can be full by checkpointed
 894				 * and newly written blocks.
 895				 */
 896				if (!f2fs_segment_has_free_slot(sbi, segno))
 897					goto next;
 898			}
 899		}
 900
 901		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 902			goto next;
 903
 904		if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
 905			goto next;
 906
 907		if (is_atgc) {
 908			add_victim_entry(sbi, &p, segno);
 909			goto next;
 910		}
 911
 912		cost = get_gc_cost(sbi, segno, &p);
 913
 914		if (p.min_cost > cost) {
 915			p.min_segno = segno;
 916			p.min_cost = cost;
 917		}
 918next:
 919		if (nsearched >= p.max_search) {
 920			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
 921				sm->last_victim[p.gc_mode] =
 922					last_victim + p.ofs_unit;
 923			else
 924				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
 925			sm->last_victim[p.gc_mode] %=
 926				(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
 927			break;
 928		}
 929	}
 930
 931	/* get victim for GC_AT/AT_SSR */
 932	if (is_atgc) {
 933		lookup_victim_by_age(sbi, &p);
 934		release_victim_entry(sbi);
 935	}
 936
 937	if (is_atgc && p.min_segno == NULL_SEGNO &&
 938			sm->elapsed_time < p.age_threshold) {
 939		p.age_threshold = 0;
 940		goto retry;
 941	}
 942
 943	if (p.min_segno != NULL_SEGNO) {
 944got_it:
 945		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 946got_result:
 947		if (p.alloc_mode == LFS) {
 948			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 949			if (gc_type == FG_GC)
 950				sbi->cur_victim_sec = secno;
 951			else
 952				set_bit(secno, dirty_i->victim_secmap);
 953		}
 954		ret = 0;
 955
 956	}
 957out:
 958	if (p.min_segno != NULL_SEGNO)
 959		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 960				sbi->cur_victim_sec,
 961				prefree_segments(sbi), free_segments(sbi));
 962	mutex_unlock(&dirty_i->seglist_lock);
 963
 964	return ret;
 965}
 966
 967static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
 968{
 969	struct inode_entry *ie;
 970
 971	ie = radix_tree_lookup(&gc_list->iroot, ino);
 972	if (ie)
 973		return ie->inode;
 974	return NULL;
 975}
 976
 977static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
 978{
 979	struct inode_entry *new_ie;
 980
 981	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
 982		iput(inode);
 983		return;
 984	}
 985	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
 986					GFP_NOFS, true, NULL);
 987	new_ie->inode = inode;
 988
 989	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
 990	list_add_tail(&new_ie->list, &gc_list->ilist);
 991}
 992
 993static void put_gc_inode(struct gc_inode_list *gc_list)
 994{
 995	struct inode_entry *ie, *next_ie;
 996
 997	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
 998		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
 999		iput(ie->inode);
1000		list_del(&ie->list);
1001		kmem_cache_free(f2fs_inode_entry_slab, ie);
1002	}
1003}
1004
1005static int check_valid_map(struct f2fs_sb_info *sbi,
1006				unsigned int segno, int offset)
1007{
1008	struct sit_info *sit_i = SIT_I(sbi);
1009	struct seg_entry *sentry;
1010	int ret;
1011
1012	down_read(&sit_i->sentry_lock);
1013	sentry = get_seg_entry(sbi, segno);
1014	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
1015	up_read(&sit_i->sentry_lock);
1016	return ret;
1017}
1018
1019/*
1020 * This function compares node address got in summary with that in NAT.
1021 * On validity, copy that node with cold status, otherwise (invalid node)
1022 * ignore that.
1023 */
1024static int gc_node_segment(struct f2fs_sb_info *sbi,
1025		struct f2fs_summary *sum, unsigned int segno, int gc_type)
1026{
1027	struct f2fs_summary *entry;
1028	block_t start_addr;
1029	int off;
1030	int phase = 0;
1031	bool fggc = (gc_type == FG_GC);
1032	int submitted = 0;
1033	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1034
1035	start_addr = START_BLOCK(sbi, segno);
1036
1037next_step:
1038	entry = sum;
1039
1040	if (fggc && phase == 2)
1041		atomic_inc(&sbi->wb_sync_req[NODE]);
1042
1043	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1044		nid_t nid = le32_to_cpu(entry->nid);
1045		struct page *node_page;
1046		struct node_info ni;
1047		int err;
1048
1049		/* stop BG_GC if there is not enough free sections. */
1050		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1051			return submitted;
1052
1053		if (check_valid_map(sbi, segno, off) == 0)
1054			continue;
1055
1056		if (phase == 0) {
1057			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1058							META_NAT, true);
1059			continue;
1060		}
1061
1062		if (phase == 1) {
1063			f2fs_ra_node_page(sbi, nid);
1064			continue;
1065		}
1066
1067		/* phase == 2 */
1068		node_page = f2fs_get_node_page(sbi, nid);
1069		if (IS_ERR(node_page))
1070			continue;
1071
1072		/* block may become invalid during f2fs_get_node_page */
1073		if (check_valid_map(sbi, segno, off) == 0) {
1074			f2fs_put_page(node_page, 1);
1075			continue;
1076		}
1077
1078		if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1079			f2fs_put_page(node_page, 1);
1080			continue;
1081		}
1082
1083		if (ni.blk_addr != start_addr + off) {
1084			f2fs_put_page(node_page, 1);
1085			continue;
1086		}
1087
1088		err = f2fs_move_node_page(node_page, gc_type);
1089		if (!err && gc_type == FG_GC)
1090			submitted++;
1091		stat_inc_node_blk_count(sbi, 1, gc_type);
1092	}
1093
1094	if (++phase < 3)
1095		goto next_step;
1096
1097	if (fggc)
1098		atomic_dec(&sbi->wb_sync_req[NODE]);
1099	return submitted;
1100}
1101
1102/*
1103 * Calculate start block index indicating the given node offset.
1104 * Be careful, caller should give this node offset only indicating direct node
1105 * blocks. If any node offsets, which point the other types of node blocks such
1106 * as indirect or double indirect node blocks, are given, it must be a caller's
1107 * bug.
1108 */
1109block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1110{
1111	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1112	unsigned int bidx;
1113
1114	if (node_ofs == 0)
1115		return 0;
1116
1117	if (node_ofs <= 2) {
1118		bidx = node_ofs - 1;
1119	} else if (node_ofs <= indirect_blks) {
1120		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1121
1122		bidx = node_ofs - 2 - dec;
1123	} else {
1124		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1125
1126		bidx = node_ofs - 5 - dec;
1127	}
1128	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1129}
1130
1131static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1132		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1133{
1134	struct page *node_page;
1135	nid_t nid;
1136	unsigned int ofs_in_node, max_addrs, base;
1137	block_t source_blkaddr;
1138
1139	nid = le32_to_cpu(sum->nid);
1140	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1141
1142	node_page = f2fs_get_node_page(sbi, nid);
1143	if (IS_ERR(node_page))
1144		return false;
1145
1146	if (f2fs_get_node_info(sbi, nid, dni, false)) {
1147		f2fs_put_page(node_page, 1);
1148		return false;
1149	}
1150
1151	if (sum->version != dni->version) {
1152		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1153			  __func__);
1154		set_sbi_flag(sbi, SBI_NEED_FSCK);
1155	}
1156
1157	if (f2fs_check_nid_range(sbi, dni->ino)) {
1158		f2fs_put_page(node_page, 1);
1159		return false;
1160	}
1161
1162	if (IS_INODE(node_page)) {
1163		base = offset_in_addr(F2FS_INODE(node_page));
1164		max_addrs = DEF_ADDRS_PER_INODE;
1165	} else {
1166		base = 0;
1167		max_addrs = DEF_ADDRS_PER_BLOCK;
1168	}
1169
1170	if (base + ofs_in_node >= max_addrs) {
1171		f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1172			base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1173		f2fs_put_page(node_page, 1);
1174		return false;
1175	}
1176
1177	*nofs = ofs_of_node(node_page);
1178	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1179	f2fs_put_page(node_page, 1);
1180
1181	if (source_blkaddr != blkaddr) {
1182#ifdef CONFIG_F2FS_CHECK_FS
1183		unsigned int segno = GET_SEGNO(sbi, blkaddr);
1184		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1185
1186		if (unlikely(check_valid_map(sbi, segno, offset))) {
1187			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1188				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1189					 blkaddr, source_blkaddr, segno);
1190				set_sbi_flag(sbi, SBI_NEED_FSCK);
1191			}
1192		}
1193#endif
1194		return false;
1195	}
1196	return true;
1197}
1198
1199static int ra_data_block(struct inode *inode, pgoff_t index)
1200{
1201	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1202	struct address_space *mapping = f2fs_is_cow_file(inode) ?
1203				F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1204	struct dnode_of_data dn;
1205	struct page *page;
1206	struct f2fs_io_info fio = {
1207		.sbi = sbi,
1208		.ino = inode->i_ino,
1209		.type = DATA,
1210		.temp = COLD,
1211		.op = REQ_OP_READ,
1212		.op_flags = 0,
1213		.encrypted_page = NULL,
1214		.in_list = 0,
1215	};
1216	int err;
1217
1218	page = f2fs_grab_cache_page(mapping, index, true);
1219	if (!page)
1220		return -ENOMEM;
1221
1222	if (f2fs_lookup_read_extent_cache_block(inode, index,
1223						&dn.data_blkaddr)) {
1224		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1225						DATA_GENERIC_ENHANCE_READ))) {
1226			err = -EFSCORRUPTED;
1227			goto put_page;
1228		}
1229		goto got_it;
1230	}
1231
1232	set_new_dnode(&dn, inode, NULL, NULL, 0);
1233	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1234	if (err)
1235		goto put_page;
1236	f2fs_put_dnode(&dn);
1237
1238	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1239		err = -ENOENT;
1240		goto put_page;
1241	}
1242	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1243						DATA_GENERIC_ENHANCE))) {
1244		err = -EFSCORRUPTED;
1245		goto put_page;
1246	}
1247got_it:
1248	/* read page */
1249	fio.page = page;
1250	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1251
1252	/*
1253	 * don't cache encrypted data into meta inode until previous dirty
1254	 * data were writebacked to avoid racing between GC and flush.
1255	 */
1256	f2fs_wait_on_page_writeback(page, DATA, true, true);
1257
1258	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1259
1260	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1261					dn.data_blkaddr,
1262					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1263	if (!fio.encrypted_page) {
1264		err = -ENOMEM;
1265		goto put_page;
1266	}
1267
1268	err = f2fs_submit_page_bio(&fio);
1269	if (err)
1270		goto put_encrypted_page;
1271	f2fs_put_page(fio.encrypted_page, 0);
1272	f2fs_put_page(page, 1);
1273
1274	f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1275	f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1276
1277	return 0;
1278put_encrypted_page:
1279	f2fs_put_page(fio.encrypted_page, 1);
1280put_page:
1281	f2fs_put_page(page, 1);
1282	return err;
1283}
1284
1285/*
1286 * Move data block via META_MAPPING while keeping locked data page.
1287 * This can be used to move blocks, aka LBAs, directly on disk.
1288 */
1289static int move_data_block(struct inode *inode, block_t bidx,
1290				int gc_type, unsigned int segno, int off)
1291{
1292	struct address_space *mapping = f2fs_is_cow_file(inode) ?
1293				F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1294	struct f2fs_io_info fio = {
1295		.sbi = F2FS_I_SB(inode),
1296		.ino = inode->i_ino,
1297		.type = DATA,
1298		.temp = COLD,
1299		.op = REQ_OP_READ,
1300		.op_flags = 0,
1301		.encrypted_page = NULL,
1302		.in_list = 0,
1303	};
1304	struct dnode_of_data dn;
1305	struct f2fs_summary sum;
1306	struct node_info ni;
1307	struct page *page, *mpage;
1308	block_t newaddr;
1309	int err = 0;
1310	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1311	int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1312				(fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1313				CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1314
1315	/* do not read out */
1316	page = f2fs_grab_cache_page(mapping, bidx, false);
1317	if (!page)
1318		return -ENOMEM;
1319
1320	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1321		err = -ENOENT;
1322		goto out;
1323	}
1324
1325	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1326	if (err)
1327		goto out;
1328
1329	set_new_dnode(&dn, inode, NULL, NULL, 0);
1330	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1331	if (err)
1332		goto out;
1333
1334	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1335		ClearPageUptodate(page);
1336		err = -ENOENT;
1337		goto put_out;
1338	}
1339
1340	/*
1341	 * don't cache encrypted data into meta inode until previous dirty
1342	 * data were writebacked to avoid racing between GC and flush.
1343	 */
1344	f2fs_wait_on_page_writeback(page, DATA, true, true);
1345
1346	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1347
1348	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1349	if (err)
1350		goto put_out;
1351
1352	/* read page */
1353	fio.page = page;
1354	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1355
1356	if (lfs_mode)
1357		f2fs_down_write(&fio.sbi->io_order_lock);
1358
1359	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1360					fio.old_blkaddr, false);
1361	if (!mpage) {
1362		err = -ENOMEM;
1363		goto up_out;
1364	}
1365
1366	fio.encrypted_page = mpage;
1367
1368	/* read source block in mpage */
1369	if (!PageUptodate(mpage)) {
1370		err = f2fs_submit_page_bio(&fio);
1371		if (err) {
1372			f2fs_put_page(mpage, 1);
1373			goto up_out;
1374		}
1375
1376		f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1377							F2FS_BLKSIZE);
1378		f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1379							F2FS_BLKSIZE);
1380
1381		lock_page(mpage);
1382		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1383						!PageUptodate(mpage))) {
1384			err = -EIO;
1385			f2fs_put_page(mpage, 1);
1386			goto up_out;
1387		}
1388	}
1389
1390	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1391
1392	/* allocate block address */
1393	err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1394				&sum, type, NULL);
1395	if (err) {
1396		f2fs_put_page(mpage, 1);
1397		/* filesystem should shutdown, no need to recovery block */
1398		goto up_out;
1399	}
1400
1401	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1402				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1403	if (!fio.encrypted_page) {
1404		err = -ENOMEM;
1405		f2fs_put_page(mpage, 1);
1406		goto recover_block;
1407	}
1408
1409	/* write target block */
1410	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1411	memcpy(page_address(fio.encrypted_page),
1412				page_address(mpage), PAGE_SIZE);
1413	f2fs_put_page(mpage, 1);
1414
1415	f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1416
1417	set_page_dirty(fio.encrypted_page);
1418	if (clear_page_dirty_for_io(fio.encrypted_page))
1419		dec_page_count(fio.sbi, F2FS_DIRTY_META);
1420
1421	set_page_writeback(fio.encrypted_page);
1422
1423	fio.op = REQ_OP_WRITE;
1424	fio.op_flags = REQ_SYNC;
1425	fio.new_blkaddr = newaddr;
1426	f2fs_submit_page_write(&fio);
1427
1428	f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1429
1430	f2fs_update_data_blkaddr(&dn, newaddr);
1431	set_inode_flag(inode, FI_APPEND_WRITE);
1432
1433	f2fs_put_page(fio.encrypted_page, 1);
1434recover_block:
1435	if (err)
1436		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1437							true, true, true);
1438up_out:
1439	if (lfs_mode)
1440		f2fs_up_write(&fio.sbi->io_order_lock);
1441put_out:
1442	f2fs_put_dnode(&dn);
1443out:
1444	f2fs_put_page(page, 1);
1445	return err;
1446}
1447
1448static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1449							unsigned int segno, int off)
1450{
1451	struct page *page;
1452	int err = 0;
1453
1454	page = f2fs_get_lock_data_page(inode, bidx, true);
1455	if (IS_ERR(page))
1456		return PTR_ERR(page);
1457
1458	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1459		err = -ENOENT;
1460		goto out;
1461	}
1462
1463	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1464	if (err)
1465		goto out;
1466
1467	if (gc_type == BG_GC) {
1468		if (folio_test_writeback(page_folio(page))) {
1469			err = -EAGAIN;
1470			goto out;
1471		}
1472		set_page_dirty(page);
1473		set_page_private_gcing(page);
1474	} else {
1475		struct f2fs_io_info fio = {
1476			.sbi = F2FS_I_SB(inode),
1477			.ino = inode->i_ino,
1478			.type = DATA,
1479			.temp = COLD,
1480			.op = REQ_OP_WRITE,
1481			.op_flags = REQ_SYNC,
1482			.old_blkaddr = NULL_ADDR,
1483			.page = page,
1484			.encrypted_page = NULL,
1485			.need_lock = LOCK_REQ,
1486			.io_type = FS_GC_DATA_IO,
1487		};
1488		bool is_dirty = PageDirty(page);
1489
1490retry:
1491		f2fs_wait_on_page_writeback(page, DATA, true, true);
1492
1493		set_page_dirty(page);
1494		if (clear_page_dirty_for_io(page)) {
1495			inode_dec_dirty_pages(inode);
1496			f2fs_remove_dirty_inode(inode);
1497		}
1498
1499		set_page_private_gcing(page);
1500
1501		err = f2fs_do_write_data_page(&fio);
1502		if (err) {
1503			clear_page_private_gcing(page);
1504			if (err == -ENOMEM) {
1505				memalloc_retry_wait(GFP_NOFS);
1506				goto retry;
1507			}
1508			if (is_dirty)
1509				set_page_dirty(page);
1510		}
1511	}
1512out:
1513	f2fs_put_page(page, 1);
1514	return err;
1515}
1516
1517/*
1518 * This function tries to get parent node of victim data block, and identifies
1519 * data block validity. If the block is valid, copy that with cold status and
1520 * modify parent node.
1521 * If the parent node is not valid or the data block address is different,
1522 * the victim data block is ignored.
1523 */
1524static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1525		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1526		bool force_migrate)
1527{
1528	struct super_block *sb = sbi->sb;
1529	struct f2fs_summary *entry;
1530	block_t start_addr;
1531	int off;
1532	int phase = 0;
1533	int submitted = 0;
1534	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1535
1536	start_addr = START_BLOCK(sbi, segno);
1537
1538next_step:
1539	entry = sum;
1540
1541	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1542		struct page *data_page;
1543		struct inode *inode;
1544		struct node_info dni; /* dnode info for the data */
1545		unsigned int ofs_in_node, nofs;
1546		block_t start_bidx;
1547		nid_t nid = le32_to_cpu(entry->nid);
1548
1549		/*
1550		 * stop BG_GC if there is not enough free sections.
1551		 * Or, stop GC if the segment becomes fully valid caused by
1552		 * race condition along with SSR block allocation.
1553		 */
1554		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1555			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
1556							CAP_BLKS_PER_SEC(sbi)))
1557			return submitted;
1558
1559		if (check_valid_map(sbi, segno, off) == 0)
1560			continue;
1561
1562		if (phase == 0) {
1563			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1564							META_NAT, true);
1565			continue;
1566		}
1567
1568		if (phase == 1) {
1569			f2fs_ra_node_page(sbi, nid);
1570			continue;
1571		}
1572
1573		/* Get an inode by ino with checking validity */
1574		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1575			continue;
1576
1577		if (phase == 2) {
1578			f2fs_ra_node_page(sbi, dni.ino);
1579			continue;
1580		}
1581
1582		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1583
1584		if (phase == 3) {
1585			int err;
1586
1587			inode = f2fs_iget(sb, dni.ino);
1588			if (IS_ERR(inode))
1589				continue;
1590
1591			if (is_bad_inode(inode) ||
1592					special_file(inode->i_mode)) {
1593				iput(inode);
1594				continue;
1595			}
1596
1597			if (f2fs_has_inline_data(inode)) {
1598				iput(inode);
1599				set_sbi_flag(sbi, SBI_NEED_FSCK);
1600				f2fs_err_ratelimited(sbi,
1601					"inode %lx has both inline_data flag and "
1602					"data block, nid=%u, ofs_in_node=%u",
1603					inode->i_ino, dni.nid, ofs_in_node);
1604				continue;
1605			}
1606
1607			err = f2fs_gc_pinned_control(inode, gc_type, segno);
1608			if (err == -EAGAIN) {
1609				iput(inode);
1610				return submitted;
1611			}
1612
1613			if (!f2fs_down_write_trylock(
1614				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1615				iput(inode);
1616				sbi->skipped_gc_rwsem++;
1617				continue;
1618			}
1619
1620			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1621								ofs_in_node;
1622
1623			if (f2fs_meta_inode_gc_required(inode)) {
1624				int err = ra_data_block(inode, start_bidx);
1625
1626				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1627				if (err) {
1628					iput(inode);
1629					continue;
1630				}
1631				add_gc_inode(gc_list, inode);
1632				continue;
1633			}
1634
1635			data_page = f2fs_get_read_data_page(inode, start_bidx,
1636							REQ_RAHEAD, true, NULL);
1637			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1638			if (IS_ERR(data_page)) {
1639				iput(inode);
1640				continue;
1641			}
1642
1643			f2fs_put_page(data_page, 0);
1644			add_gc_inode(gc_list, inode);
1645			continue;
1646		}
1647
1648		/* phase 4 */
1649		inode = find_gc_inode(gc_list, dni.ino);
1650		if (inode) {
1651			struct f2fs_inode_info *fi = F2FS_I(inode);
1652			bool locked = false;
1653			int err;
1654
1655			if (S_ISREG(inode->i_mode)) {
1656				if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1657					sbi->skipped_gc_rwsem++;
1658					continue;
1659				}
1660				if (!f2fs_down_write_trylock(
1661						&fi->i_gc_rwsem[READ])) {
1662					sbi->skipped_gc_rwsem++;
1663					f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1664					continue;
1665				}
1666				locked = true;
1667
1668				/* wait for all inflight aio data */
1669				inode_dio_wait(inode);
1670			}
1671
1672			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1673								+ ofs_in_node;
1674			if (f2fs_meta_inode_gc_required(inode))
1675				err = move_data_block(inode, start_bidx,
1676							gc_type, segno, off);
1677			else
1678				err = move_data_page(inode, start_bidx, gc_type,
1679								segno, off);
1680
1681			if (!err && (gc_type == FG_GC ||
1682					f2fs_meta_inode_gc_required(inode)))
1683				submitted++;
1684
1685			if (locked) {
1686				f2fs_up_write(&fi->i_gc_rwsem[READ]);
1687				f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1688			}
1689
1690			stat_inc_data_blk_count(sbi, 1, gc_type);
1691		}
1692	}
1693
1694	if (++phase < 5)
1695		goto next_step;
1696
1697	return submitted;
1698}
1699
1700static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1701			int gc_type, bool one_time)
1702{
1703	struct sit_info *sit_i = SIT_I(sbi);
1704	int ret;
1705
1706	down_write(&sit_i->sentry_lock);
1707	ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
1708			LFS, 0, one_time);
1709	up_write(&sit_i->sentry_lock);
1710	return ret;
1711}
1712
1713static int do_garbage_collect(struct f2fs_sb_info *sbi,
1714				unsigned int start_segno,
1715				struct gc_inode_list *gc_list, int gc_type,
1716				bool force_migrate, bool one_time)
1717{
1718	struct page *sum_page;
1719	struct f2fs_summary_block *sum;
1720	struct blk_plug plug;
1721	unsigned int segno = start_segno;
1722	unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1723	unsigned int sec_end_segno;
1724	int seg_freed = 0, migrated = 0;
1725	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1726						SUM_TYPE_DATA : SUM_TYPE_NODE;
1727	unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1728	int submitted = 0;
1729
1730	if (__is_large_section(sbi)) {
1731		sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1732
1733		/*
1734		 * zone-capacity can be less than zone-size in zoned devices,
1735		 * resulting in less than expected usable segments in the zone,
1736		 * calculate the end segno in the zone which can be garbage
1737		 * collected
1738		 */
1739		if (f2fs_sb_has_blkzoned(sbi))
1740			sec_end_segno -= SEGS_PER_SEC(sbi) -
1741					f2fs_usable_segs_in_sec(sbi);
1742
1743		if (gc_type == BG_GC || one_time) {
1744			unsigned int window_granularity =
1745				sbi->migration_window_granularity;
1746
1747			if (f2fs_sb_has_blkzoned(sbi) &&
1748					!has_enough_free_blocks(sbi,
1749					sbi->gc_thread->boost_zoned_gc_percent))
1750				window_granularity *=
1751					BOOST_GC_MULTIPLE;
1752
1753			end_segno = start_segno + window_granularity;
1754		}
1755
1756		if (end_segno > sec_end_segno)
1757			end_segno = sec_end_segno;
1758	}
1759
1760	sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1761
1762	/* readahead multi ssa blocks those have contiguous address */
1763	if (__is_large_section(sbi))
1764		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1765					end_segno - segno, META_SSA, true);
1766
1767	/* reference all summary page */
1768	while (segno < end_segno) {
1769		sum_page = f2fs_get_sum_page(sbi, segno++);
1770		if (IS_ERR(sum_page)) {
1771			int err = PTR_ERR(sum_page);
1772
1773			end_segno = segno - 1;
1774			for (segno = start_segno; segno < end_segno; segno++) {
1775				sum_page = find_get_page(META_MAPPING(sbi),
1776						GET_SUM_BLOCK(sbi, segno));
1777				f2fs_put_page(sum_page, 0);
1778				f2fs_put_page(sum_page, 0);
1779			}
1780			return err;
1781		}
1782		unlock_page(sum_page);
1783	}
1784
1785	blk_start_plug(&plug);
1786
1787	for (segno = start_segno; segno < end_segno; segno++) {
1788
1789		/* find segment summary of victim */
1790		sum_page = find_get_page(META_MAPPING(sbi),
1791					GET_SUM_BLOCK(sbi, segno));
1792		f2fs_put_page(sum_page, 0);
1793
1794		if (get_valid_blocks(sbi, segno, false) == 0)
1795			goto freed;
1796		if (gc_type == BG_GC && __is_large_section(sbi) &&
1797				migrated >= sbi->migration_granularity)
1798			goto skip;
1799		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1800			goto skip;
1801
1802		sum = page_address(sum_page);
1803		if (type != GET_SUM_TYPE((&sum->footer))) {
1804			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1805				 segno, type, GET_SUM_TYPE((&sum->footer)));
 
1806			f2fs_stop_checkpoint(sbi, false,
1807				STOP_CP_REASON_CORRUPTED_SUMMARY);
1808			goto skip;
1809		}
1810
1811		/*
1812		 * this is to avoid deadlock:
1813		 * - lock_page(sum_page)         - f2fs_replace_block
1814		 *  - check_valid_map()            - down_write(sentry_lock)
1815		 *   - down_read(sentry_lock)     - change_curseg()
1816		 *                                  - lock_page(sum_page)
1817		 */
1818		if (type == SUM_TYPE_NODE)
1819			submitted += gc_node_segment(sbi, sum->entries, segno,
1820								gc_type);
1821		else
1822			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1823							segno, gc_type,
1824							force_migrate);
1825
1826		stat_inc_gc_seg_count(sbi, data_type, gc_type);
1827		sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1828		migrated++;
1829
1830freed:
1831		if (gc_type == FG_GC &&
1832				get_valid_blocks(sbi, segno, false) == 0)
1833			seg_freed++;
1834
1835		if (__is_large_section(sbi))
1836			sbi->next_victim_seg[gc_type] =
1837				(segno + 1 < sec_end_segno) ?
1838					segno + 1 : NULL_SEGNO;
1839skip:
1840		f2fs_put_page(sum_page, 0);
1841	}
1842
1843	if (submitted)
1844		f2fs_submit_merged_write(sbi, data_type);
1845
1846	blk_finish_plug(&plug);
1847
1848	if (migrated)
1849		stat_inc_gc_sec_count(sbi, data_type, gc_type);
1850
1851	return seg_freed;
1852}
1853
1854int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1855{
1856	int gc_type = gc_control->init_gc_type;
1857	unsigned int segno = gc_control->victim_segno;
1858	int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1859	int ret = 0;
1860	struct cp_control cpc;
1861	struct gc_inode_list gc_list = {
1862		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1863		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1864	};
1865	unsigned int skipped_round = 0, round = 0;
1866	unsigned int upper_secs;
1867
1868	trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1869				gc_control->nr_free_secs,
1870				get_pages(sbi, F2FS_DIRTY_NODES),
1871				get_pages(sbi, F2FS_DIRTY_DENTS),
1872				get_pages(sbi, F2FS_DIRTY_IMETA),
1873				free_sections(sbi),
1874				free_segments(sbi),
1875				reserved_segments(sbi),
1876				prefree_segments(sbi));
1877
1878	cpc.reason = __get_cp_reason(sbi);
1879gc_more:
1880	sbi->skipped_gc_rwsem = 0;
1881	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1882		ret = -EINVAL;
1883		goto stop;
1884	}
1885	if (unlikely(f2fs_cp_error(sbi))) {
1886		ret = -EIO;
1887		goto stop;
1888	}
1889
1890	/* Let's run FG_GC, if we don't have enough space. */
1891	if (has_not_enough_free_secs(sbi, 0, 0)) {
1892		gc_type = FG_GC;
1893
1894		/*
1895		 * For example, if there are many prefree_segments below given
1896		 * threshold, we can make them free by checkpoint. Then, we
1897		 * secure free segments which doesn't need fggc any more.
1898		 */
1899		if (prefree_segments(sbi)) {
1900			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1901			ret = f2fs_write_checkpoint(sbi, &cpc);
1902			if (ret)
1903				goto stop;
1904			/* Reset due to checkpoint */
1905			sec_freed = 0;
1906		}
1907	}
1908
1909	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1910	if (gc_type == BG_GC && gc_control->no_bg_gc) {
1911		ret = -EINVAL;
1912		goto stop;
1913	}
1914retry:
1915	ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
1916	if (ret) {
1917		/* allow to search victim from sections has pinned data */
1918		if (ret == -ENODATA && gc_type == FG_GC &&
1919				f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1920			f2fs_unpin_all_sections(sbi, false);
1921			goto retry;
1922		}
1923		goto stop;
1924	}
1925
1926	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1927				gc_control->should_migrate_blocks,
1928				gc_control->one_time);
1929	if (seg_freed < 0)
1930		goto stop;
1931
1932	total_freed += seg_freed;
1933
1934	if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
1935		sec_freed++;
1936		total_sec_freed++;
1937	}
1938
1939	if (gc_control->one_time)
1940		goto stop;
1941
1942	if (gc_type == FG_GC) {
1943		sbi->cur_victim_sec = NULL_SEGNO;
1944
1945		if (has_enough_free_secs(sbi, sec_freed, 0)) {
1946			if (!gc_control->no_bg_gc &&
1947			    total_sec_freed < gc_control->nr_free_secs)
1948				goto go_gc_more;
1949			goto stop;
1950		}
1951		if (sbi->skipped_gc_rwsem)
1952			skipped_round++;
1953		round++;
1954		if (skipped_round > MAX_SKIP_GC_COUNT &&
1955				skipped_round * 2 >= round) {
1956			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1957			ret = f2fs_write_checkpoint(sbi, &cpc);
1958			goto stop;
1959		}
1960	} else if (has_enough_free_secs(sbi, 0, 0)) {
1961		goto stop;
1962	}
1963
1964	__get_secs_required(sbi, NULL, &upper_secs, NULL);
1965
1966	/*
1967	 * Write checkpoint to reclaim prefree segments.
1968	 * We need more three extra sections for writer's data/node/dentry.
1969	 */
1970	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1971				prefree_segments(sbi)) {
1972		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1973		ret = f2fs_write_checkpoint(sbi, &cpc);
1974		if (ret)
1975			goto stop;
1976		/* Reset due to checkpoint */
1977		sec_freed = 0;
1978	}
1979go_gc_more:
1980	segno = NULL_SEGNO;
1981	goto gc_more;
1982
1983stop:
1984	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1985	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1986
1987	if (gc_type == FG_GC)
1988		f2fs_unpin_all_sections(sbi, true);
1989
1990	trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1991				get_pages(sbi, F2FS_DIRTY_NODES),
1992				get_pages(sbi, F2FS_DIRTY_DENTS),
1993				get_pages(sbi, F2FS_DIRTY_IMETA),
1994				free_sections(sbi),
1995				free_segments(sbi),
1996				reserved_segments(sbi),
1997				prefree_segments(sbi));
1998
1999	f2fs_up_write(&sbi->gc_lock);
2000
2001	put_gc_inode(&gc_list);
2002
2003	if (gc_control->err_gc_skipped && !ret)
2004		ret = total_sec_freed ? 0 : -EAGAIN;
2005	return ret;
2006}
2007
2008int __init f2fs_create_garbage_collection_cache(void)
2009{
2010	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
2011					sizeof(struct victim_entry));
2012	return victim_entry_slab ? 0 : -ENOMEM;
2013}
2014
2015void f2fs_destroy_garbage_collection_cache(void)
2016{
2017	kmem_cache_destroy(victim_entry_slab);
2018}
2019
2020static void init_atgc_management(struct f2fs_sb_info *sbi)
2021{
2022	struct atgc_management *am = &sbi->am;
2023
2024	if (test_opt(sbi, ATGC) &&
2025		SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2026		am->atgc_enabled = true;
2027
2028	am->root = RB_ROOT_CACHED;
2029	INIT_LIST_HEAD(&am->victim_list);
2030	am->victim_count = 0;
2031
2032	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
2033	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
2034	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
2035	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
2036}
2037
2038void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
2039{
2040	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
2041
2042	/* give warm/cold data area from slower device */
2043	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
2044		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2045				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
2046
2047	init_atgc_management(sbi);
2048}
2049
2050int f2fs_gc_range(struct f2fs_sb_info *sbi,
2051		unsigned int start_seg, unsigned int end_seg,
2052		bool dry_run, unsigned int dry_run_sections)
2053{
2054	unsigned int segno;
2055	unsigned int gc_secs = dry_run_sections;
2056
2057	if (unlikely(f2fs_cp_error(sbi)))
2058		return -EIO;
2059
2060	for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2061		struct gc_inode_list gc_list = {
2062			.ilist = LIST_HEAD_INIT(gc_list.ilist),
2063			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2064		};
2065
2066		do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
 
2067		put_gc_inode(&gc_list);
2068
2069		if (!dry_run && get_valid_blocks(sbi, segno, true))
2070			return -EAGAIN;
2071		if (dry_run && dry_run_sections &&
2072		    !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2073			break;
2074
2075		if (fatal_signal_pending(current))
2076			return -ERESTARTSYS;
2077	}
2078
2079	return 0;
2080}
2081
2082static int free_segment_range(struct f2fs_sb_info *sbi,
2083				unsigned int secs, bool dry_run)
2084{
2085	unsigned int next_inuse, start, end;
2086	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2087	int gc_mode, gc_type;
2088	int err = 0;
2089	int type;
2090
2091	/* Force block allocation for GC */
2092	MAIN_SECS(sbi) -= secs;
2093	start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2094	end = MAIN_SEGS(sbi) - 1;
2095
2096	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2097	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2098		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2099			SIT_I(sbi)->last_victim[gc_mode] = 0;
2100
2101	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2102		if (sbi->next_victim_seg[gc_type] >= start)
2103			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2104	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2105
2106	/* Move out cursegs from the target range */
2107	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2108		err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2109		if (err)
2110			goto out;
2111	}
2112
2113	/* do GC to move out valid blocks in the range */
2114	err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2115	if (err || dry_run)
2116		goto out;
2117
2118	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2119	err = f2fs_write_checkpoint(sbi, &cpc);
2120	if (err)
2121		goto out;
2122
2123	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2124	if (next_inuse <= end) {
2125		f2fs_err(sbi, "segno %u should be free but still inuse!",
2126			 next_inuse);
2127		f2fs_bug_on(sbi, 1);
2128	}
2129out:
2130	MAIN_SECS(sbi) += secs;
2131	return err;
2132}
2133
2134static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2135{
2136	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2137	int section_count;
2138	int segment_count;
2139	int segment_count_main;
2140	long long block_count;
2141	int segs = secs * SEGS_PER_SEC(sbi);
2142
2143	f2fs_down_write(&sbi->sb_lock);
2144
2145	section_count = le32_to_cpu(raw_sb->section_count);
2146	segment_count = le32_to_cpu(raw_sb->segment_count);
2147	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2148	block_count = le64_to_cpu(raw_sb->block_count);
2149
2150	raw_sb->section_count = cpu_to_le32(section_count + secs);
2151	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2152	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2153	raw_sb->block_count = cpu_to_le64(block_count +
2154			(long long)SEGS_TO_BLKS(sbi, segs));
2155	if (f2fs_is_multi_device(sbi)) {
2156		int last_dev = sbi->s_ndevs - 1;
2157		int dev_segs =
2158			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2159
2160		raw_sb->devs[last_dev].total_segments =
2161						cpu_to_le32(dev_segs + segs);
2162	}
2163
2164	f2fs_up_write(&sbi->sb_lock);
2165}
2166
2167static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2168{
2169	int segs = secs * SEGS_PER_SEC(sbi);
2170	long long blks = SEGS_TO_BLKS(sbi, segs);
2171	long long user_block_count =
2172				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2173
2174	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2175	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2176	MAIN_SECS(sbi) += secs;
2177	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2178	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2179	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2180
2181	if (f2fs_is_multi_device(sbi)) {
2182		int last_dev = sbi->s_ndevs - 1;
2183
2184		FDEV(last_dev).total_segments =
2185				(int)FDEV(last_dev).total_segments + segs;
2186		FDEV(last_dev).end_blk =
2187				(long long)FDEV(last_dev).end_blk + blks;
2188#ifdef CONFIG_BLK_DEV_ZONED
2189		FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2190					div_u64(blks, sbi->blocks_per_blkz);
2191#endif
2192	}
2193}
2194
2195int f2fs_resize_fs(struct file *filp, __u64 block_count)
2196{
2197	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2198	__u64 old_block_count, shrunk_blocks;
2199	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2200	unsigned int secs;
2201	int err = 0;
2202	__u32 rem;
2203
2204	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2205	if (block_count > old_block_count)
2206		return -EINVAL;
2207
2208	if (f2fs_is_multi_device(sbi)) {
2209		int last_dev = sbi->s_ndevs - 1;
2210		__u64 last_segs = FDEV(last_dev).total_segments;
2211
2212		if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2213								old_block_count)
2214			return -EINVAL;
2215	}
2216
2217	/* new fs size should align to section size */
2218	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2219	if (rem)
2220		return -EINVAL;
2221
2222	if (block_count == old_block_count)
2223		return 0;
2224
2225	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2226		f2fs_err(sbi, "Should run fsck to repair first.");
2227		return -EFSCORRUPTED;
2228	}
2229
2230	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2231		f2fs_err(sbi, "Checkpoint should be enabled.");
2232		return -EINVAL;
2233	}
2234
2235	err = mnt_want_write_file(filp);
2236	if (err)
2237		return err;
2238
2239	shrunk_blocks = old_block_count - block_count;
2240	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2241
2242	/* stop other GC */
2243	if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2244		err = -EAGAIN;
2245		goto out_drop_write;
2246	}
2247
2248	/* stop CP to protect MAIN_SEC in free_segment_range */
2249	f2fs_lock_op(sbi);
2250
2251	spin_lock(&sbi->stat_lock);
2252	if (shrunk_blocks + valid_user_blocks(sbi) +
2253		sbi->current_reserved_blocks + sbi->unusable_block_count +
2254		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2255		err = -ENOSPC;
2256	spin_unlock(&sbi->stat_lock);
2257
2258	if (err)
2259		goto out_unlock;
2260
2261	err = free_segment_range(sbi, secs, true);
2262
2263out_unlock:
2264	f2fs_unlock_op(sbi);
2265	f2fs_up_write(&sbi->gc_lock);
2266out_drop_write:
2267	mnt_drop_write_file(filp);
2268	if (err)
2269		return err;
2270
2271	err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2272	if (err)
2273		return err;
2274
2275	if (f2fs_readonly(sbi->sb)) {
2276		err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2277		if (err)
2278			return err;
2279		return -EROFS;
2280	}
2281
2282	f2fs_down_write(&sbi->gc_lock);
2283	f2fs_down_write(&sbi->cp_global_sem);
2284
2285	spin_lock(&sbi->stat_lock);
2286	if (shrunk_blocks + valid_user_blocks(sbi) +
2287		sbi->current_reserved_blocks + sbi->unusable_block_count +
2288		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2289		err = -ENOSPC;
2290	else
2291		sbi->user_block_count -= shrunk_blocks;
2292	spin_unlock(&sbi->stat_lock);
2293	if (err)
2294		goto out_err;
2295
2296	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2297	err = free_segment_range(sbi, secs, false);
2298	if (err)
2299		goto recover_out;
2300
2301	update_sb_metadata(sbi, -secs);
2302
2303	err = f2fs_commit_super(sbi, false);
2304	if (err) {
2305		update_sb_metadata(sbi, secs);
2306		goto recover_out;
2307	}
2308
2309	update_fs_metadata(sbi, -secs);
2310	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2311	set_sbi_flag(sbi, SBI_IS_DIRTY);
2312
2313	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2314	err = f2fs_write_checkpoint(sbi, &cpc);
2315	if (err) {
2316		update_fs_metadata(sbi, secs);
2317		update_sb_metadata(sbi, secs);
2318		f2fs_commit_super(sbi, false);
2319	}
2320recover_out:
2321	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2322	if (err) {
2323		set_sbi_flag(sbi, SBI_NEED_FSCK);
2324		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2325
2326		spin_lock(&sbi->stat_lock);
2327		sbi->user_block_count += shrunk_blocks;
2328		spin_unlock(&sbi->stat_lock);
2329	}
2330out_err:
2331	f2fs_up_write(&sbi->cp_global_sem);
2332	f2fs_up_write(&sbi->gc_lock);
2333	thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2334	return err;
2335}