Linux Audio

Check our new training course

Loading...
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/gc.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/module.h>
 
  10#include <linux/init.h>
  11#include <linux/f2fs_fs.h>
  12#include <linux/kthread.h>
  13#include <linux/delay.h>
  14#include <linux/freezer.h>
  15#include <linux/sched/signal.h>
  16#include <linux/random.h>
  17#include <linux/sched/mm.h>
  18
  19#include "f2fs.h"
  20#include "node.h"
  21#include "segment.h"
  22#include "gc.h"
  23#include "iostat.h"
  24#include <trace/events/f2fs.h>
  25
  26static struct kmem_cache *victim_entry_slab;
  27
  28static unsigned int count_bits(const unsigned long *addr,
  29				unsigned int offset, unsigned int len);
  30
  31static int gc_thread_func(void *data)
  32{
  33	struct f2fs_sb_info *sbi = data;
  34	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  35	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
  36	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
  37	unsigned int wait_ms;
  38	struct f2fs_gc_control gc_control = {
  39		.victim_segno = NULL_SEGNO,
  40		.should_migrate_blocks = false,
  41		.err_gc_skipped = false };
  42
  43	wait_ms = gc_th->min_sleep_time;
  44
  45	set_freezable();
  46	do {
  47		bool sync_mode, foreground = false;
  48
  49		wait_event_freezable_timeout(*wq,
  50				kthread_should_stop() ||
  51				waitqueue_active(fggc_wq) ||
  52				gc_th->gc_wake,
  53				msecs_to_jiffies(wait_ms));
  54
  55		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
  56			foreground = true;
  57
  58		/* give it a try one time */
  59		if (gc_th->gc_wake)
  60			gc_th->gc_wake = false;
  61
  62		if (f2fs_readonly(sbi->sb)) {
  63			stat_other_skip_bggc_count(sbi);
  64			continue;
  65		}
  66		if (kthread_should_stop())
  67			break;
  68
  69		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
  70			increase_sleep_time(gc_th, &wait_ms);
  71			stat_other_skip_bggc_count(sbi);
  72			continue;
  73		}
  74
  75		if (time_to_inject(sbi, FAULT_CHECKPOINT))
  76			f2fs_stop_checkpoint(sbi, false,
  77					STOP_CP_REASON_FAULT_INJECT);
 
  78
  79		if (!sb_start_write_trylock(sbi->sb)) {
  80			stat_other_skip_bggc_count(sbi);
  81			continue;
  82		}
  83
  84		/*
  85		 * [GC triggering condition]
  86		 * 0. GC is not conducted currently.
  87		 * 1. There are enough dirty segments.
  88		 * 2. IO subsystem is idle by checking the # of writeback pages.
  89		 * 3. IO subsystem is idle by checking the # of requests in
  90		 *    bdev's request list.
  91		 *
  92		 * Note) We have to avoid triggering GCs frequently.
  93		 * Because it is possible that some segments can be
  94		 * invalidated soon after by user update or deletion.
  95		 * So, I'd like to wait some time to collect dirty segments.
  96		 */
  97		if (sbi->gc_mode == GC_URGENT_HIGH ||
  98				sbi->gc_mode == GC_URGENT_MID) {
  99			wait_ms = gc_th->urgent_sleep_time;
 100			f2fs_down_write(&sbi->gc_lock);
 101			goto do_gc;
 102		}
 103
 104		if (foreground) {
 105			f2fs_down_write(&sbi->gc_lock);
 106			goto do_gc;
 107		} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
 108			stat_other_skip_bggc_count(sbi);
 109			goto next;
 110		}
 111
 112		if (!is_idle(sbi, GC_TIME)) {
 113			increase_sleep_time(gc_th, &wait_ms);
 114			f2fs_up_write(&sbi->gc_lock);
 115			stat_io_skip_bggc_count(sbi);
 116			goto next;
 117		}
 118
 119		if (has_enough_invalid_blocks(sbi))
 120			decrease_sleep_time(gc_th, &wait_ms);
 121		else
 122			increase_sleep_time(gc_th, &wait_ms);
 123do_gc:
 124		stat_inc_gc_call_count(sbi, foreground ?
 125					FOREGROUND : BACKGROUND);
 126
 127		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
 128
 129		/* foreground GC was been triggered via f2fs_balance_fs() */
 130		if (foreground)
 131			sync_mode = false;
 132
 133		gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
 134		gc_control.no_bg_gc = foreground;
 135		gc_control.nr_free_secs = foreground ? 1 : 0;
 136
 137		/* if return value is not zero, no victim was selected */
 138		if (f2fs_gc(sbi, &gc_control)) {
 139			/* don't bother wait_ms by foreground gc */
 140			if (!foreground)
 141				wait_ms = gc_th->no_gc_sleep_time;
 142		} else {
 143			/* reset wait_ms to default sleep time */
 144			if (wait_ms == gc_th->no_gc_sleep_time)
 145				wait_ms = gc_th->min_sleep_time;
 146		}
 147
 148		if (foreground)
 149			wake_up_all(&gc_th->fggc_wq);
 150
 151		trace_f2fs_background_gc(sbi->sb, wait_ms,
 152				prefree_segments(sbi), free_segments(sbi));
 153
 154		/* balancing f2fs's metadata periodically */
 155		f2fs_balance_fs_bg(sbi, true);
 156next:
 157		if (sbi->gc_mode != GC_NORMAL) {
 158			spin_lock(&sbi->gc_remaining_trials_lock);
 159			if (sbi->gc_remaining_trials) {
 160				sbi->gc_remaining_trials--;
 161				if (!sbi->gc_remaining_trials)
 162					sbi->gc_mode = GC_NORMAL;
 163			}
 164			spin_unlock(&sbi->gc_remaining_trials_lock);
 165		}
 166		sb_end_write(sbi->sb);
 167
 168	} while (!kthread_should_stop());
 169	return 0;
 170}
 171
 172int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
 173{
 174	struct f2fs_gc_kthread *gc_th;
 175	dev_t dev = sbi->sb->s_bdev->bd_dev;
 
 176
 177	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
 178	if (!gc_th)
 179		return -ENOMEM;
 
 
 180
 181	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
 182	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
 183	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
 184	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
 185
 186	gc_th->gc_wake = false;
 187
 188	sbi->gc_thread = gc_th;
 189	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
 190	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
 191	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
 192			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 193	if (IS_ERR(gc_th->f2fs_gc_task)) {
 194		int err = PTR_ERR(gc_th->f2fs_gc_task);
 195
 196		kfree(gc_th);
 197		sbi->gc_thread = NULL;
 198		return err;
 199	}
 200
 201	return 0;
 202}
 203
 204void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
 205{
 206	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 207
 208	if (!gc_th)
 209		return;
 210	kthread_stop(gc_th->f2fs_gc_task);
 211	wake_up_all(&gc_th->fggc_wq);
 212	kfree(gc_th);
 213	sbi->gc_thread = NULL;
 214}
 215
 216static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
 217{
 218	int gc_mode;
 219
 220	if (gc_type == BG_GC) {
 221		if (sbi->am.atgc_enabled)
 222			gc_mode = GC_AT;
 223		else
 224			gc_mode = GC_CB;
 225	} else {
 226		gc_mode = GC_GREEDY;
 227	}
 228
 229	switch (sbi->gc_mode) {
 230	case GC_IDLE_CB:
 231		gc_mode = GC_CB;
 232		break;
 233	case GC_IDLE_GREEDY:
 234	case GC_URGENT_HIGH:
 235		gc_mode = GC_GREEDY;
 236		break;
 237	case GC_IDLE_AT:
 238		gc_mode = GC_AT;
 239		break;
 240	}
 241
 242	return gc_mode;
 243}
 244
 245static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 246			int type, struct victim_sel_policy *p)
 247{
 248	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 249
 250	if (p->alloc_mode == SSR) {
 251		p->gc_mode = GC_GREEDY;
 252		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 253		p->max_search = dirty_i->nr_dirty[type];
 254		p->ofs_unit = 1;
 255	} else if (p->alloc_mode == AT_SSR) {
 256		p->gc_mode = GC_GREEDY;
 257		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 258		p->max_search = dirty_i->nr_dirty[type];
 259		p->ofs_unit = 1;
 260	} else {
 261		p->gc_mode = select_gc_type(sbi, gc_type);
 262		p->ofs_unit = SEGS_PER_SEC(sbi);
 263		if (__is_large_section(sbi)) {
 264			p->dirty_bitmap = dirty_i->dirty_secmap;
 265			p->max_search = count_bits(p->dirty_bitmap,
 266						0, MAIN_SECS(sbi));
 267		} else {
 268			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
 269			p->max_search = dirty_i->nr_dirty[DIRTY];
 270		}
 271	}
 272
 273	/*
 274	 * adjust candidates range, should select all dirty segments for
 275	 * foreground GC and urgent GC cases.
 276	 */
 277	if (gc_type != FG_GC &&
 278			(sbi->gc_mode != GC_URGENT_HIGH) &&
 279			(p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
 280			p->max_search > sbi->max_victim_search)
 281		p->max_search = sbi->max_victim_search;
 282
 283	/* let's select beginning hot/small space first. */
 284	if (f2fs_need_rand_seg(sbi))
 285		p->offset = get_random_u32_below(MAIN_SECS(sbi) *
 286						SEGS_PER_SEC(sbi));
 287	else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
 288		p->offset = 0;
 289	else
 290		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
 291}
 292
 293static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 294				struct victim_sel_policy *p)
 295{
 296	/* SSR allocates in a segment unit */
 297	if (p->alloc_mode == SSR)
 298		return BLKS_PER_SEG(sbi);
 299	else if (p->alloc_mode == AT_SSR)
 300		return UINT_MAX;
 301
 302	/* LFS */
 303	if (p->gc_mode == GC_GREEDY)
 304		return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
 305	else if (p->gc_mode == GC_CB)
 306		return UINT_MAX;
 307	else if (p->gc_mode == GC_AT)
 308		return UINT_MAX;
 309	else /* No other gc_mode */
 310		return 0;
 311}
 312
 313static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 314{
 315	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 316	unsigned int secno;
 317
 318	/*
 319	 * If the gc_type is FG_GC, we can select victim segments
 320	 * selected by background GC before.
 321	 * Those segments guarantee they have small valid blocks.
 322	 */
 323	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
 324		if (sec_usage_check(sbi, secno))
 325			continue;
 326		clear_bit(secno, dirty_i->victim_secmap);
 327		return GET_SEG_FROM_SEC(sbi, secno);
 328	}
 329	return NULL_SEGNO;
 330}
 331
 332static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 333{
 334	struct sit_info *sit_i = SIT_I(sbi);
 335	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 336	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 337	unsigned long long mtime = 0;
 338	unsigned int vblocks;
 339	unsigned char age = 0;
 340	unsigned char u;
 341	unsigned int i;
 342	unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
 343
 344	for (i = 0; i < usable_segs_per_sec; i++)
 345		mtime += get_seg_entry(sbi, start + i)->mtime;
 346	vblocks = get_valid_blocks(sbi, segno, true);
 347
 348	mtime = div_u64(mtime, usable_segs_per_sec);
 349	vblocks = div_u64(vblocks, usable_segs_per_sec);
 350
 351	u = BLKS_TO_SEGS(sbi, vblocks * 100);
 352
 353	/* Handle if the system time has changed by the user */
 354	if (mtime < sit_i->min_mtime)
 355		sit_i->min_mtime = mtime;
 356	if (mtime > sit_i->max_mtime)
 357		sit_i->max_mtime = mtime;
 358	if (sit_i->max_mtime != sit_i->min_mtime)
 359		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
 360				sit_i->max_mtime - sit_i->min_mtime);
 361
 362	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 363}
 364
 365static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
 366			unsigned int segno, struct victim_sel_policy *p)
 367{
 368	if (p->alloc_mode == SSR)
 369		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
 370
 371	/* alloc_mode == LFS */
 372	if (p->gc_mode == GC_GREEDY)
 373		return get_valid_blocks(sbi, segno, true);
 374	else if (p->gc_mode == GC_CB)
 375		return get_cb_cost(sbi, segno);
 376
 377	f2fs_bug_on(sbi, 1);
 378	return 0;
 379}
 380
 381static unsigned int count_bits(const unsigned long *addr,
 382				unsigned int offset, unsigned int len)
 383{
 384	unsigned int end = offset + len, sum = 0;
 385
 386	while (offset < end) {
 387		if (test_bit(offset++, addr))
 388			++sum;
 389	}
 390	return sum;
 391}
 392
 393static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
 394				struct rb_root_cached *root)
 395{
 396#ifdef CONFIG_F2FS_CHECK_FS
 397	struct rb_node *cur = rb_first_cached(root), *next;
 398	struct victim_entry *cur_ve, *next_ve;
 399
 400	while (cur) {
 401		next = rb_next(cur);
 402		if (!next)
 403			return true;
 404
 405		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
 406		next_ve = rb_entry(next, struct victim_entry, rb_node);
 407
 408		if (cur_ve->mtime > next_ve->mtime) {
 409			f2fs_info(sbi, "broken victim_rbtree, "
 410				"cur_mtime(%llu) next_mtime(%llu)",
 411				cur_ve->mtime, next_ve->mtime);
 412			return false;
 413		}
 414		cur = next;
 415	}
 416#endif
 417	return true;
 418}
 419
 420static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
 421					unsigned long long mtime)
 422{
 423	struct atgc_management *am = &sbi->am;
 424	struct rb_node *node = am->root.rb_root.rb_node;
 425	struct victim_entry *ve = NULL;
 426
 427	while (node) {
 428		ve = rb_entry(node, struct victim_entry, rb_node);
 429
 430		if (mtime < ve->mtime)
 431			node = node->rb_left;
 432		else
 433			node = node->rb_right;
 434	}
 435	return ve;
 436}
 437
 438static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
 439		unsigned long long mtime, unsigned int segno)
 440{
 441	struct atgc_management *am = &sbi->am;
 442	struct victim_entry *ve;
 443
 444	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
 445
 446	ve->mtime = mtime;
 447	ve->segno = segno;
 448
 449	list_add_tail(&ve->list, &am->victim_list);
 450	am->victim_count++;
 451
 452	return ve;
 453}
 454
 455static void __insert_victim_entry(struct f2fs_sb_info *sbi,
 456				unsigned long long mtime, unsigned int segno)
 457{
 458	struct atgc_management *am = &sbi->am;
 459	struct rb_root_cached *root = &am->root;
 460	struct rb_node **p = &root->rb_root.rb_node;
 461	struct rb_node *parent = NULL;
 462	struct victim_entry *ve;
 463	bool left_most = true;
 464
 465	/* look up rb tree to find parent node */
 466	while (*p) {
 467		parent = *p;
 468		ve = rb_entry(parent, struct victim_entry, rb_node);
 469
 470		if (mtime < ve->mtime) {
 471			p = &(*p)->rb_left;
 472		} else {
 473			p = &(*p)->rb_right;
 474			left_most = false;
 475		}
 476	}
 477
 478	ve = __create_victim_entry(sbi, mtime, segno);
 479
 480	rb_link_node(&ve->rb_node, parent, p);
 481	rb_insert_color_cached(&ve->rb_node, root, left_most);
 482}
 483
 484static void add_victim_entry(struct f2fs_sb_info *sbi,
 485				struct victim_sel_policy *p, unsigned int segno)
 486{
 487	struct sit_info *sit_i = SIT_I(sbi);
 488	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 489	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 490	unsigned long long mtime = 0;
 491	unsigned int i;
 492
 493	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 494		if (p->gc_mode == GC_AT &&
 495			get_valid_blocks(sbi, segno, true) == 0)
 496			return;
 497	}
 498
 499	for (i = 0; i < SEGS_PER_SEC(sbi); i++)
 500		mtime += get_seg_entry(sbi, start + i)->mtime;
 501	mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
 502
 503	/* Handle if the system time has changed by the user */
 504	if (mtime < sit_i->min_mtime)
 505		sit_i->min_mtime = mtime;
 506	if (mtime > sit_i->max_mtime)
 507		sit_i->max_mtime = mtime;
 508	if (mtime < sit_i->dirty_min_mtime)
 509		sit_i->dirty_min_mtime = mtime;
 510	if (mtime > sit_i->dirty_max_mtime)
 511		sit_i->dirty_max_mtime = mtime;
 512
 513	/* don't choose young section as candidate */
 514	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
 515		return;
 516
 517	__insert_victim_entry(sbi, mtime, segno);
 518}
 519
 520static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
 521						struct victim_sel_policy *p)
 522{
 523	struct sit_info *sit_i = SIT_I(sbi);
 524	struct atgc_management *am = &sbi->am;
 525	struct rb_root_cached *root = &am->root;
 526	struct rb_node *node;
 527	struct victim_entry *ve;
 528	unsigned long long total_time;
 529	unsigned long long age, u, accu;
 530	unsigned long long max_mtime = sit_i->dirty_max_mtime;
 531	unsigned long long min_mtime = sit_i->dirty_min_mtime;
 532	unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
 533	unsigned int vblocks;
 534	unsigned int dirty_threshold = max(am->max_candidate_count,
 535					am->candidate_ratio *
 536					am->victim_count / 100);
 537	unsigned int age_weight = am->age_weight;
 538	unsigned int cost;
 539	unsigned int iter = 0;
 540
 541	if (max_mtime < min_mtime)
 542		return;
 543
 544	max_mtime += 1;
 545	total_time = max_mtime - min_mtime;
 546
 547	accu = div64_u64(ULLONG_MAX, total_time);
 548	accu = min_t(unsigned long long, div_u64(accu, 100),
 549					DEFAULT_ACCURACY_CLASS);
 550
 551	node = rb_first_cached(root);
 552next:
 553	ve = rb_entry_safe(node, struct victim_entry, rb_node);
 554	if (!ve)
 555		return;
 556
 557	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 558		goto skip;
 559
 560	/* age = 10000 * x% * 60 */
 561	age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
 562								age_weight;
 563
 564	vblocks = get_valid_blocks(sbi, ve->segno, true);
 565	f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
 566
 567	/* u = 10000 * x% * 40 */
 568	u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
 569							(100 - age_weight);
 570
 571	f2fs_bug_on(sbi, age + u >= UINT_MAX);
 572
 573	cost = UINT_MAX - (age + u);
 574	iter++;
 575
 576	if (cost < p->min_cost ||
 577			(cost == p->min_cost && age > p->oldest_age)) {
 578		p->min_cost = cost;
 579		p->oldest_age = age;
 580		p->min_segno = ve->segno;
 581	}
 582skip:
 583	if (iter < dirty_threshold) {
 584		node = rb_next(node);
 585		goto next;
 586	}
 587}
 588
 589/*
 590 * select candidates around source section in range of
 591 * [target - dirty_threshold, target + dirty_threshold]
 592 */
 593static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
 594						struct victim_sel_policy *p)
 595{
 596	struct sit_info *sit_i = SIT_I(sbi);
 597	struct atgc_management *am = &sbi->am;
 598	struct victim_entry *ve;
 599	unsigned long long age;
 600	unsigned long long max_mtime = sit_i->dirty_max_mtime;
 601	unsigned long long min_mtime = sit_i->dirty_min_mtime;
 602	unsigned int vblocks;
 603	unsigned int dirty_threshold = max(am->max_candidate_count,
 604					am->candidate_ratio *
 605					am->victim_count / 100);
 606	unsigned int cost, iter;
 607	int stage = 0;
 608
 609	if (max_mtime < min_mtime)
 610		return;
 611	max_mtime += 1;
 612next_stage:
 613	iter = 0;
 614	ve = __lookup_victim_entry(sbi, p->age);
 615next_node:
 616	if (!ve) {
 617		if (stage++ == 0)
 618			goto next_stage;
 619		return;
 620	}
 621
 622	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
 623		goto skip_node;
 624
 625	age = max_mtime - ve->mtime;
 626
 627	vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
 628	f2fs_bug_on(sbi, !vblocks);
 629
 630	/* rare case */
 631	if (vblocks == BLKS_PER_SEG(sbi))
 632		goto skip_node;
 633
 634	iter++;
 635
 636	age = max_mtime - abs(p->age - age);
 637	cost = UINT_MAX - vblocks;
 638
 639	if (cost < p->min_cost ||
 640			(cost == p->min_cost && age > p->oldest_age)) {
 641		p->min_cost = cost;
 642		p->oldest_age = age;
 643		p->min_segno = ve->segno;
 644	}
 645skip_node:
 646	if (iter < dirty_threshold) {
 647		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
 648					rb_next(&ve->rb_node),
 649					struct victim_entry, rb_node);
 650		goto next_node;
 651	}
 652
 653	if (stage++ == 0)
 654		goto next_stage;
 655}
 656
 657static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
 658						struct victim_sel_policy *p)
 659{
 660	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
 661
 662	if (p->gc_mode == GC_AT)
 663		atgc_lookup_victim(sbi, p);
 664	else if (p->alloc_mode == AT_SSR)
 665		atssr_lookup_victim(sbi, p);
 666	else
 667		f2fs_bug_on(sbi, 1);
 668}
 669
 670static void release_victim_entry(struct f2fs_sb_info *sbi)
 671{
 672	struct atgc_management *am = &sbi->am;
 673	struct victim_entry *ve, *tmp;
 674
 675	list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
 676		list_del(&ve->list);
 677		kmem_cache_free(victim_entry_slab, ve);
 678		am->victim_count--;
 679	}
 680
 681	am->root = RB_ROOT_CACHED;
 682
 683	f2fs_bug_on(sbi, am->victim_count);
 684	f2fs_bug_on(sbi, !list_empty(&am->victim_list));
 685}
 686
 687static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
 688{
 689	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 690	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 691
 692	if (!dirty_i->enable_pin_section)
 693		return false;
 694	if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
 695		dirty_i->pinned_secmap_cnt++;
 696	return true;
 697}
 698
 699static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
 700{
 701	return dirty_i->pinned_secmap_cnt;
 702}
 703
 704static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
 705						unsigned int secno)
 706{
 707	return dirty_i->enable_pin_section &&
 708		f2fs_pinned_section_exists(dirty_i) &&
 709		test_bit(secno, dirty_i->pinned_secmap);
 710}
 711
 712static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
 713{
 714	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
 715
 716	if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
 717		memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
 718		DIRTY_I(sbi)->pinned_secmap_cnt = 0;
 719	}
 720	DIRTY_I(sbi)->enable_pin_section = enable;
 721}
 722
 723static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
 724							unsigned int segno)
 725{
 726	if (!f2fs_is_pinned_file(inode))
 727		return 0;
 728	if (gc_type != FG_GC)
 729		return -EBUSY;
 730	if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
 731		f2fs_pin_file_control(inode, true);
 732	return -EAGAIN;
 733}
 734
 735/*
 736 * This function is called from two paths.
 737 * One is garbage collection and the other is SSR segment selection.
 738 * When it is called during GC, it just gets a victim segment
 739 * and it does not remove it from dirty seglist.
 740 * When it is called from SSR segment selection, it finds a segment
 741 * which has minimum valid blocks and removes it from dirty seglist.
 742 */
 743int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
 744			int gc_type, int type, char alloc_mode,
 745			unsigned long long age)
 746{
 747	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 748	struct sit_info *sm = SIT_I(sbi);
 749	struct victim_sel_policy p;
 750	unsigned int secno, last_victim;
 751	unsigned int last_segment;
 752	unsigned int nsearched;
 753	bool is_atgc;
 754	int ret = 0;
 755
 756	mutex_lock(&dirty_i->seglist_lock);
 757	last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
 758
 759	p.alloc_mode = alloc_mode;
 760	p.age = age;
 761	p.age_threshold = sbi->am.age_threshold;
 762
 763retry:
 764	select_policy(sbi, gc_type, type, &p);
 
 765	p.min_segno = NULL_SEGNO;
 766	p.oldest_age = 0;
 767	p.min_cost = get_max_cost(sbi, &p);
 768
 769	is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
 770	nsearched = 0;
 771
 772	if (is_atgc)
 773		SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
 774
 775	if (*result != NULL_SEGNO) {
 776		if (!get_valid_blocks(sbi, *result, false)) {
 777			ret = -ENODATA;
 778			goto out;
 779		}
 780
 781		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 782			ret = -EBUSY;
 783		else
 784			p.min_segno = *result;
 785		goto out;
 786	}
 787
 788	ret = -ENODATA;
 789	if (p.max_search == 0)
 790		goto out;
 791
 792	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
 793		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
 794			p.min_segno = sbi->next_victim_seg[BG_GC];
 795			*result = p.min_segno;
 796			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
 797			goto got_result;
 798		}
 799		if (gc_type == FG_GC &&
 800				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
 801			p.min_segno = sbi->next_victim_seg[FG_GC];
 802			*result = p.min_segno;
 803			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 804			goto got_result;
 805		}
 806	}
 807
 808	last_victim = sm->last_victim[p.gc_mode];
 809	if (p.alloc_mode == LFS && gc_type == FG_GC) {
 810		p.min_segno = check_bg_victims(sbi);
 811		if (p.min_segno != NULL_SEGNO)
 812			goto got_it;
 813	}
 814
 815	while (1) {
 816		unsigned long cost, *dirty_bitmap;
 817		unsigned int unit_no, segno;
 818
 819		dirty_bitmap = p.dirty_bitmap;
 820		unit_no = find_next_bit(dirty_bitmap,
 821				last_segment / p.ofs_unit,
 822				p.offset / p.ofs_unit);
 823		segno = unit_no * p.ofs_unit;
 824		if (segno >= last_segment) {
 825			if (sm->last_victim[p.gc_mode]) {
 826				last_segment =
 827					sm->last_victim[p.gc_mode];
 828				sm->last_victim[p.gc_mode] = 0;
 829				p.offset = 0;
 830				continue;
 831			}
 832			break;
 833		}
 834
 835		p.offset = segno + p.ofs_unit;
 836		nsearched++;
 837
 838#ifdef CONFIG_F2FS_CHECK_FS
 839		/*
 840		 * skip selecting the invalid segno (that is failed due to block
 841		 * validity check failure during GC) to avoid endless GC loop in
 842		 * such cases.
 843		 */
 844		if (test_bit(segno, sm->invalid_segmap))
 845			goto next;
 846#endif
 847
 848		secno = GET_SEC_FROM_SEG(sbi, segno);
 849
 850		if (sec_usage_check(sbi, secno))
 851			goto next;
 852
 853		/* Don't touch checkpointed data */
 854		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
 855			if (p.alloc_mode == LFS) {
 856				/*
 857				 * LFS is set to find source section during GC.
 858				 * The victim should have no checkpointed data.
 859				 */
 860				if (get_ckpt_valid_blocks(sbi, segno, true))
 861					goto next;
 862			} else {
 863				/*
 864				 * SSR | AT_SSR are set to find target segment
 865				 * for writes which can be full by checkpointed
 866				 * and newly written blocks.
 867				 */
 868				if (!f2fs_segment_has_free_slot(sbi, segno))
 869					goto next;
 870			}
 871		}
 872
 873		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 874			goto next;
 875
 876		if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
 877			goto next;
 878
 879		if (is_atgc) {
 880			add_victim_entry(sbi, &p, segno);
 881			goto next;
 882		}
 883
 884		cost = get_gc_cost(sbi, segno, &p);
 885
 886		if (p.min_cost > cost) {
 887			p.min_segno = segno;
 888			p.min_cost = cost;
 889		}
 890next:
 891		if (nsearched >= p.max_search) {
 892			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
 893				sm->last_victim[p.gc_mode] =
 894					last_victim + p.ofs_unit;
 895			else
 896				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
 897			sm->last_victim[p.gc_mode] %=
 898				(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
 899			break;
 900		}
 901	}
 902
 903	/* get victim for GC_AT/AT_SSR */
 904	if (is_atgc) {
 905		lookup_victim_by_age(sbi, &p);
 906		release_victim_entry(sbi);
 907	}
 908
 909	if (is_atgc && p.min_segno == NULL_SEGNO &&
 910			sm->elapsed_time < p.age_threshold) {
 911		p.age_threshold = 0;
 912		goto retry;
 913	}
 914
 915	if (p.min_segno != NULL_SEGNO) {
 916got_it:
 917		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 918got_result:
 919		if (p.alloc_mode == LFS) {
 920			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 921			if (gc_type == FG_GC)
 922				sbi->cur_victim_sec = secno;
 923			else
 924				set_bit(secno, dirty_i->victim_secmap);
 925		}
 926		ret = 0;
 927
 928	}
 929out:
 930	if (p.min_segno != NULL_SEGNO)
 931		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 932				sbi->cur_victim_sec,
 933				prefree_segments(sbi), free_segments(sbi));
 934	mutex_unlock(&dirty_i->seglist_lock);
 935
 936	return ret;
 937}
 938
 
 
 
 
 939static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
 940{
 941	struct inode_entry *ie;
 942
 943	ie = radix_tree_lookup(&gc_list->iroot, ino);
 944	if (ie)
 945		return ie->inode;
 946	return NULL;
 947}
 948
 949static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
 950{
 951	struct inode_entry *new_ie;
 952
 953	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
 954		iput(inode);
 955		return;
 956	}
 957	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
 958					GFP_NOFS, true, NULL);
 959	new_ie->inode = inode;
 960
 961	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
 962	list_add_tail(&new_ie->list, &gc_list->ilist);
 963}
 964
 965static void put_gc_inode(struct gc_inode_list *gc_list)
 966{
 967	struct inode_entry *ie, *next_ie;
 968
 969	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
 970		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
 971		iput(ie->inode);
 972		list_del(&ie->list);
 973		kmem_cache_free(f2fs_inode_entry_slab, ie);
 974	}
 975}
 976
 977static int check_valid_map(struct f2fs_sb_info *sbi,
 978				unsigned int segno, int offset)
 979{
 980	struct sit_info *sit_i = SIT_I(sbi);
 981	struct seg_entry *sentry;
 982	int ret;
 983
 984	down_read(&sit_i->sentry_lock);
 985	sentry = get_seg_entry(sbi, segno);
 986	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
 987	up_read(&sit_i->sentry_lock);
 988	return ret;
 989}
 990
 991/*
 992 * This function compares node address got in summary with that in NAT.
 993 * On validity, copy that node with cold status, otherwise (invalid node)
 994 * ignore that.
 995 */
 996static int gc_node_segment(struct f2fs_sb_info *sbi,
 997		struct f2fs_summary *sum, unsigned int segno, int gc_type)
 998{
 999	struct f2fs_summary *entry;
1000	block_t start_addr;
1001	int off;
1002	int phase = 0;
1003	bool fggc = (gc_type == FG_GC);
1004	int submitted = 0;
1005	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1006
1007	start_addr = START_BLOCK(sbi, segno);
1008
1009next_step:
1010	entry = sum;
1011
1012	if (fggc && phase == 2)
1013		atomic_inc(&sbi->wb_sync_req[NODE]);
1014
1015	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1016		nid_t nid = le32_to_cpu(entry->nid);
1017		struct page *node_page;
1018		struct node_info ni;
1019		int err;
1020
1021		/* stop BG_GC if there is not enough free sections. */
1022		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1023			return submitted;
1024
1025		if (check_valid_map(sbi, segno, off) == 0)
1026			continue;
1027
1028		if (phase == 0) {
1029			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1030							META_NAT, true);
1031			continue;
1032		}
1033
1034		if (phase == 1) {
1035			f2fs_ra_node_page(sbi, nid);
1036			continue;
1037		}
1038
1039		/* phase == 2 */
1040		node_page = f2fs_get_node_page(sbi, nid);
1041		if (IS_ERR(node_page))
1042			continue;
1043
1044		/* block may become invalid during f2fs_get_node_page */
1045		if (check_valid_map(sbi, segno, off) == 0) {
1046			f2fs_put_page(node_page, 1);
1047			continue;
1048		}
1049
1050		if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1051			f2fs_put_page(node_page, 1);
1052			continue;
1053		}
1054
1055		if (ni.blk_addr != start_addr + off) {
1056			f2fs_put_page(node_page, 1);
1057			continue;
1058		}
1059
1060		err = f2fs_move_node_page(node_page, gc_type);
1061		if (!err && gc_type == FG_GC)
1062			submitted++;
1063		stat_inc_node_blk_count(sbi, 1, gc_type);
1064	}
1065
1066	if (++phase < 3)
1067		goto next_step;
1068
1069	if (fggc)
1070		atomic_dec(&sbi->wb_sync_req[NODE]);
1071	return submitted;
1072}
1073
1074/*
1075 * Calculate start block index indicating the given node offset.
1076 * Be careful, caller should give this node offset only indicating direct node
1077 * blocks. If any node offsets, which point the other types of node blocks such
1078 * as indirect or double indirect node blocks, are given, it must be a caller's
1079 * bug.
1080 */
1081block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1082{
1083	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1084	unsigned int bidx;
1085
1086	if (node_ofs == 0)
1087		return 0;
1088
1089	if (node_ofs <= 2) {
1090		bidx = node_ofs - 1;
1091	} else if (node_ofs <= indirect_blks) {
1092		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1093
1094		bidx = node_ofs - 2 - dec;
1095	} else {
1096		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1097
1098		bidx = node_ofs - 5 - dec;
1099	}
1100	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1101}
1102
1103static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1104		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1105{
1106	struct page *node_page;
1107	nid_t nid;
1108	unsigned int ofs_in_node, max_addrs, base;
1109	block_t source_blkaddr;
1110
1111	nid = le32_to_cpu(sum->nid);
1112	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1113
1114	node_page = f2fs_get_node_page(sbi, nid);
1115	if (IS_ERR(node_page))
1116		return false;
1117
1118	if (f2fs_get_node_info(sbi, nid, dni, false)) {
1119		f2fs_put_page(node_page, 1);
1120		return false;
1121	}
1122
1123	if (sum->version != dni->version) {
1124		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1125			  __func__);
1126		set_sbi_flag(sbi, SBI_NEED_FSCK);
1127	}
1128
1129	if (f2fs_check_nid_range(sbi, dni->ino)) {
1130		f2fs_put_page(node_page, 1);
1131		return false;
1132	}
1133
1134	if (IS_INODE(node_page)) {
1135		base = offset_in_addr(F2FS_INODE(node_page));
1136		max_addrs = DEF_ADDRS_PER_INODE;
1137	} else {
1138		base = 0;
1139		max_addrs = DEF_ADDRS_PER_BLOCK;
1140	}
1141
1142	if (base + ofs_in_node >= max_addrs) {
1143		f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1144			base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1145		f2fs_put_page(node_page, 1);
1146		return false;
1147	}
1148
1149	*nofs = ofs_of_node(node_page);
1150	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1151	f2fs_put_page(node_page, 1);
1152
1153	if (source_blkaddr != blkaddr) {
1154#ifdef CONFIG_F2FS_CHECK_FS
1155		unsigned int segno = GET_SEGNO(sbi, blkaddr);
1156		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1157
1158		if (unlikely(check_valid_map(sbi, segno, offset))) {
1159			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1160				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1161					 blkaddr, source_blkaddr, segno);
1162				set_sbi_flag(sbi, SBI_NEED_FSCK);
1163			}
1164		}
1165#endif
1166		return false;
1167	}
1168	return true;
1169}
1170
1171static int ra_data_block(struct inode *inode, pgoff_t index)
1172{
1173	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174	struct address_space *mapping = inode->i_mapping;
1175	struct dnode_of_data dn;
1176	struct page *page;
 
1177	struct f2fs_io_info fio = {
1178		.sbi = sbi,
1179		.ino = inode->i_ino,
1180		.type = DATA,
1181		.temp = COLD,
1182		.op = REQ_OP_READ,
1183		.op_flags = 0,
1184		.encrypted_page = NULL,
1185		.in_list = 0,
 
1186	};
1187	int err;
1188
1189	page = f2fs_grab_cache_page(mapping, index, true);
1190	if (!page)
1191		return -ENOMEM;
1192
1193	if (f2fs_lookup_read_extent_cache_block(inode, index,
1194						&dn.data_blkaddr)) {
1195		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1196						DATA_GENERIC_ENHANCE_READ))) {
1197			err = -EFSCORRUPTED;
1198			goto put_page;
1199		}
1200		goto got_it;
1201	}
1202
1203	set_new_dnode(&dn, inode, NULL, NULL, 0);
1204	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1205	if (err)
1206		goto put_page;
1207	f2fs_put_dnode(&dn);
1208
1209	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1210		err = -ENOENT;
1211		goto put_page;
1212	}
1213	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1214						DATA_GENERIC_ENHANCE))) {
1215		err = -EFSCORRUPTED;
1216		goto put_page;
1217	}
1218got_it:
1219	/* read page */
1220	fio.page = page;
1221	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1222
1223	/*
1224	 * don't cache encrypted data into meta inode until previous dirty
1225	 * data were writebacked to avoid racing between GC and flush.
1226	 */
1227	f2fs_wait_on_page_writeback(page, DATA, true, true);
1228
1229	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1230
1231	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1232					dn.data_blkaddr,
1233					FGP_LOCK | FGP_CREAT, GFP_NOFS);
1234	if (!fio.encrypted_page) {
1235		err = -ENOMEM;
1236		goto put_page;
1237	}
1238
1239	err = f2fs_submit_page_bio(&fio);
1240	if (err)
1241		goto put_encrypted_page;
1242	f2fs_put_page(fio.encrypted_page, 0);
1243	f2fs_put_page(page, 1);
1244
1245	f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1246	f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1247
1248	return 0;
1249put_encrypted_page:
1250	f2fs_put_page(fio.encrypted_page, 1);
1251put_page:
1252	f2fs_put_page(page, 1);
1253	return err;
1254}
1255
1256/*
1257 * Move data block via META_MAPPING while keeping locked data page.
1258 * This can be used to move blocks, aka LBAs, directly on disk.
1259 */
1260static int move_data_block(struct inode *inode, block_t bidx,
1261				int gc_type, unsigned int segno, int off)
1262{
1263	struct f2fs_io_info fio = {
1264		.sbi = F2FS_I_SB(inode),
1265		.ino = inode->i_ino,
1266		.type = DATA,
1267		.temp = COLD,
1268		.op = REQ_OP_READ,
1269		.op_flags = 0,
1270		.encrypted_page = NULL,
1271		.in_list = 0,
 
1272	};
1273	struct dnode_of_data dn;
1274	struct f2fs_summary sum;
1275	struct node_info ni;
1276	struct page *page, *mpage;
1277	block_t newaddr;
1278	int err = 0;
1279	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1280	int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1281				(fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1282				CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1283
1284	/* do not read out */
1285	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1286	if (!page)
1287		return -ENOMEM;
1288
1289	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1290		err = -ENOENT;
1291		goto out;
1292	}
1293
1294	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1295	if (err)
 
 
1296		goto out;
 
 
 
 
 
 
 
1297
1298	set_new_dnode(&dn, inode, NULL, NULL, 0);
1299	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1300	if (err)
1301		goto out;
1302
1303	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1304		ClearPageUptodate(page);
1305		err = -ENOENT;
1306		goto put_out;
1307	}
1308
1309	/*
1310	 * don't cache encrypted data into meta inode until previous dirty
1311	 * data were writebacked to avoid racing between GC and flush.
1312	 */
1313	f2fs_wait_on_page_writeback(page, DATA, true, true);
1314
1315	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1316
1317	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1318	if (err)
1319		goto put_out;
1320
 
 
1321	/* read page */
1322	fio.page = page;
1323	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1324
1325	if (lfs_mode)
1326		f2fs_down_write(&fio.sbi->io_order_lock);
1327
1328	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1329					fio.old_blkaddr, false);
1330	if (!mpage) {
1331		err = -ENOMEM;
1332		goto up_out;
1333	}
1334
1335	fio.encrypted_page = mpage;
1336
1337	/* read source block in mpage */
1338	if (!PageUptodate(mpage)) {
1339		err = f2fs_submit_page_bio(&fio);
1340		if (err) {
1341			f2fs_put_page(mpage, 1);
1342			goto up_out;
1343		}
1344
1345		f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1346							F2FS_BLKSIZE);
1347		f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1348							F2FS_BLKSIZE);
1349
1350		lock_page(mpage);
1351		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1352						!PageUptodate(mpage))) {
1353			err = -EIO;
1354			f2fs_put_page(mpage, 1);
1355			goto up_out;
1356		}
1357	}
1358
1359	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1360
1361	/* allocate block address */
1362	err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1363				&sum, type, NULL);
1364	if (err) {
1365		f2fs_put_page(mpage, 1);
1366		/* filesystem should shutdown, no need to recovery block */
1367		goto up_out;
1368	}
1369
1370	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1371				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1372	if (!fio.encrypted_page) {
1373		err = -ENOMEM;
1374		f2fs_put_page(mpage, 1);
1375		goto recover_block;
1376	}
1377
1378	/* write target block */
1379	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1380	memcpy(page_address(fio.encrypted_page),
1381				page_address(mpage), PAGE_SIZE);
1382	f2fs_put_page(mpage, 1);
1383
1384	f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1385
1386	set_page_dirty(fio.encrypted_page);
1387	if (clear_page_dirty_for_io(fio.encrypted_page))
1388		dec_page_count(fio.sbi, F2FS_DIRTY_META);
1389
1390	set_page_writeback(fio.encrypted_page);
 
 
 
 
1391
1392	fio.op = REQ_OP_WRITE;
1393	fio.op_flags = REQ_SYNC;
1394	fio.new_blkaddr = newaddr;
1395	f2fs_submit_page_write(&fio);
 
 
 
 
 
 
1396
1397	f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1398
1399	f2fs_update_data_blkaddr(&dn, newaddr);
1400	set_inode_flag(inode, FI_APPEND_WRITE);
1401
 
 
1402	f2fs_put_page(fio.encrypted_page, 1);
1403recover_block:
1404	if (err)
1405		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1406							true, true, true);
1407up_out:
1408	if (lfs_mode)
1409		f2fs_up_write(&fio.sbi->io_order_lock);
1410put_out:
1411	f2fs_put_dnode(&dn);
1412out:
1413	f2fs_put_page(page, 1);
1414	return err;
1415}
1416
1417static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1418							unsigned int segno, int off)
1419{
1420	struct page *page;
1421	int err = 0;
1422
1423	page = f2fs_get_lock_data_page(inode, bidx, true);
1424	if (IS_ERR(page))
1425		return PTR_ERR(page);
1426
1427	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1428		err = -ENOENT;
1429		goto out;
1430	}
1431
1432	err = f2fs_gc_pinned_control(inode, gc_type, segno);
1433	if (err)
 
 
 
 
 
 
 
 
1434		goto out;
 
1435
1436	if (gc_type == BG_GC) {
1437		if (PageWriteback(page)) {
1438			err = -EAGAIN;
1439			goto out;
1440		}
1441		set_page_dirty(page);
1442		set_page_private_gcing(page);
1443	} else {
1444		struct f2fs_io_info fio = {
1445			.sbi = F2FS_I_SB(inode),
1446			.ino = inode->i_ino,
1447			.type = DATA,
1448			.temp = COLD,
1449			.op = REQ_OP_WRITE,
1450			.op_flags = REQ_SYNC,
1451			.old_blkaddr = NULL_ADDR,
1452			.page = page,
1453			.encrypted_page = NULL,
1454			.need_lock = LOCK_REQ,
1455			.io_type = FS_GC_DATA_IO,
1456		};
1457		bool is_dirty = PageDirty(page);
1458
1459retry:
1460		f2fs_wait_on_page_writeback(page, DATA, true, true);
1461
1462		set_page_dirty(page);
1463		if (clear_page_dirty_for_io(page)) {
1464			inode_dec_dirty_pages(inode);
1465			f2fs_remove_dirty_inode(inode);
1466		}
1467
1468		set_page_private_gcing(page);
1469
1470		err = f2fs_do_write_data_page(&fio);
1471		if (err) {
1472			clear_page_private_gcing(page);
1473			if (err == -ENOMEM) {
1474				memalloc_retry_wait(GFP_NOFS);
 
1475				goto retry;
1476			}
1477			if (is_dirty)
1478				set_page_dirty(page);
1479		}
1480	}
1481out:
1482	f2fs_put_page(page, 1);
1483	return err;
1484}
1485
1486/*
1487 * This function tries to get parent node of victim data block, and identifies
1488 * data block validity. If the block is valid, copy that with cold status and
1489 * modify parent node.
1490 * If the parent node is not valid or the data block address is different,
1491 * the victim data block is ignored.
1492 */
1493static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1494		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1495		bool force_migrate)
1496{
1497	struct super_block *sb = sbi->sb;
1498	struct f2fs_summary *entry;
1499	block_t start_addr;
1500	int off;
1501	int phase = 0;
1502	int submitted = 0;
1503	unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1504
1505	start_addr = START_BLOCK(sbi, segno);
1506
1507next_step:
1508	entry = sum;
1509
1510	for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1511		struct page *data_page;
1512		struct inode *inode;
1513		struct node_info dni; /* dnode info for the data */
1514		unsigned int ofs_in_node, nofs;
1515		block_t start_bidx;
1516		nid_t nid = le32_to_cpu(entry->nid);
1517
1518		/*
1519		 * stop BG_GC if there is not enough free sections.
1520		 * Or, stop GC if the segment becomes fully valid caused by
1521		 * race condition along with SSR block allocation.
1522		 */
1523		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1524			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
1525							CAP_BLKS_PER_SEC(sbi)))
1526			return submitted;
1527
1528		if (check_valid_map(sbi, segno, off) == 0)
1529			continue;
1530
1531		if (phase == 0) {
1532			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1533							META_NAT, true);
1534			continue;
1535		}
1536
1537		if (phase == 1) {
1538			f2fs_ra_node_page(sbi, nid);
1539			continue;
1540		}
1541
1542		/* Get an inode by ino with checking validity */
1543		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1544			continue;
1545
1546		if (phase == 2) {
1547			f2fs_ra_node_page(sbi, dni.ino);
1548			continue;
1549		}
1550
1551		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1552
1553		if (phase == 3) {
1554			int err;
1555
1556			inode = f2fs_iget(sb, dni.ino);
1557			if (IS_ERR(inode))
1558				continue;
1559
1560			if (is_bad_inode(inode) ||
1561					special_file(inode->i_mode)) {
1562				iput(inode);
1563				continue;
1564			}
1565
1566			err = f2fs_gc_pinned_control(inode, gc_type, segno);
1567			if (err == -EAGAIN) {
1568				iput(inode);
1569				return submitted;
1570			}
1571
1572			if (!f2fs_down_write_trylock(
1573				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1574				iput(inode);
1575				sbi->skipped_gc_rwsem++;
1576				continue;
1577			}
1578
1579			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1580								ofs_in_node;
1581
1582			if (f2fs_post_read_required(inode)) {
1583				int err = ra_data_block(inode, start_bidx);
1584
1585				f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1586				if (err) {
1587					iput(inode);
1588					continue;
1589				}
1590				add_gc_inode(gc_list, inode);
1591				continue;
1592			}
1593
1594			data_page = f2fs_get_read_data_page(inode, start_bidx,
1595							REQ_RAHEAD, true, NULL);
1596			f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1597			if (IS_ERR(data_page)) {
1598				iput(inode);
1599				continue;
1600			}
1601
1602			f2fs_put_page(data_page, 0);
1603			add_gc_inode(gc_list, inode);
1604			continue;
1605		}
1606
1607		/* phase 4 */
1608		inode = find_gc_inode(gc_list, dni.ino);
1609		if (inode) {
1610			struct f2fs_inode_info *fi = F2FS_I(inode);
1611			bool locked = false;
1612			int err;
1613
1614			if (S_ISREG(inode->i_mode)) {
1615				if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1616					sbi->skipped_gc_rwsem++;
1617					continue;
1618				}
1619				if (!f2fs_down_write_trylock(
1620						&fi->i_gc_rwsem[READ])) {
1621					sbi->skipped_gc_rwsem++;
1622					f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1623					continue;
1624				}
1625				locked = true;
1626
1627				/* wait for all inflight aio data */
1628				inode_dio_wait(inode);
1629			}
1630
1631			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1632								+ ofs_in_node;
1633			if (f2fs_post_read_required(inode))
1634				err = move_data_block(inode, start_bidx,
1635							gc_type, segno, off);
1636			else
1637				err = move_data_page(inode, start_bidx, gc_type,
1638								segno, off);
1639
1640			if (!err && (gc_type == FG_GC ||
1641					f2fs_post_read_required(inode)))
1642				submitted++;
1643
1644			if (locked) {
1645				f2fs_up_write(&fi->i_gc_rwsem[READ]);
1646				f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1647			}
1648
1649			stat_inc_data_blk_count(sbi, 1, gc_type);
1650		}
1651	}
1652
1653	if (++phase < 5)
1654		goto next_step;
1655
1656	return submitted;
1657}
1658
1659static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1660			int gc_type)
1661{
1662	struct sit_info *sit_i = SIT_I(sbi);
1663	int ret;
1664
1665	down_write(&sit_i->sentry_lock);
1666	ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
 
1667	up_write(&sit_i->sentry_lock);
1668	return ret;
1669}
1670
1671static int do_garbage_collect(struct f2fs_sb_info *sbi,
1672				unsigned int start_segno,
1673				struct gc_inode_list *gc_list, int gc_type,
1674				bool force_migrate)
1675{
1676	struct page *sum_page;
1677	struct f2fs_summary_block *sum;
1678	struct blk_plug plug;
1679	unsigned int segno = start_segno;
1680	unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1681	int seg_freed = 0, migrated = 0;
1682	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1683						SUM_TYPE_DATA : SUM_TYPE_NODE;
1684	unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1685	int submitted = 0;
1686
1687	if (__is_large_section(sbi))
1688		end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1689
1690	/*
1691	 * zone-capacity can be less than zone-size in zoned devices,
1692	 * resulting in less than expected usable segments in the zone,
1693	 * calculate the end segno in the zone which can be garbage collected
1694	 */
1695	if (f2fs_sb_has_blkzoned(sbi))
1696		end_segno -= SEGS_PER_SEC(sbi) -
1697					f2fs_usable_segs_in_sec(sbi, segno);
1698
1699	sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1700
1701	/* readahead multi ssa blocks those have contiguous address */
1702	if (__is_large_section(sbi))
1703		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1704					end_segno - segno, META_SSA, true);
1705
1706	/* reference all summary page */
1707	while (segno < end_segno) {
1708		sum_page = f2fs_get_sum_page(sbi, segno++);
1709		if (IS_ERR(sum_page)) {
1710			int err = PTR_ERR(sum_page);
1711
1712			end_segno = segno - 1;
1713			for (segno = start_segno; segno < end_segno; segno++) {
1714				sum_page = find_get_page(META_MAPPING(sbi),
1715						GET_SUM_BLOCK(sbi, segno));
1716				f2fs_put_page(sum_page, 0);
1717				f2fs_put_page(sum_page, 0);
1718			}
1719			return err;
1720		}
1721		unlock_page(sum_page);
1722	}
1723
1724	blk_start_plug(&plug);
1725
1726	for (segno = start_segno; segno < end_segno; segno++) {
1727
1728		/* find segment summary of victim */
1729		sum_page = find_get_page(META_MAPPING(sbi),
1730					GET_SUM_BLOCK(sbi, segno));
1731		f2fs_put_page(sum_page, 0);
1732
1733		if (get_valid_blocks(sbi, segno, false) == 0)
1734			goto freed;
1735		if (gc_type == BG_GC && __is_large_section(sbi) &&
1736				migrated >= sbi->migration_granularity)
1737			goto skip;
1738		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1739			goto skip;
1740
1741		sum = page_address(sum_page);
1742		if (type != GET_SUM_TYPE((&sum->footer))) {
1743			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1744				 segno, type, GET_SUM_TYPE((&sum->footer)));
1745			set_sbi_flag(sbi, SBI_NEED_FSCK);
1746			f2fs_stop_checkpoint(sbi, false,
1747				STOP_CP_REASON_CORRUPTED_SUMMARY);
1748			goto skip;
1749		}
1750
1751		/*
1752		 * this is to avoid deadlock:
1753		 * - lock_page(sum_page)         - f2fs_replace_block
1754		 *  - check_valid_map()            - down_write(sentry_lock)
1755		 *   - down_read(sentry_lock)     - change_curseg()
1756		 *                                  - lock_page(sum_page)
1757		 */
1758		if (type == SUM_TYPE_NODE)
1759			submitted += gc_node_segment(sbi, sum->entries, segno,
1760								gc_type);
1761		else
1762			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1763							segno, gc_type,
1764							force_migrate);
1765
1766		stat_inc_gc_seg_count(sbi, data_type, gc_type);
1767		sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1768		migrated++;
1769
1770freed:
1771		if (gc_type == FG_GC &&
1772				get_valid_blocks(sbi, segno, false) == 0)
1773			seg_freed++;
1774
1775		if (__is_large_section(sbi))
1776			sbi->next_victim_seg[gc_type] =
1777				(segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1778skip:
1779		f2fs_put_page(sum_page, 0);
1780	}
1781
1782	if (submitted)
1783		f2fs_submit_merged_write(sbi, data_type);
 
1784
1785	blk_finish_plug(&plug);
1786
1787	if (migrated)
1788		stat_inc_gc_sec_count(sbi, data_type, gc_type);
1789
1790	return seg_freed;
1791}
1792
1793int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
 
1794{
1795	int gc_type = gc_control->init_gc_type;
1796	unsigned int segno = gc_control->victim_segno;
1797	int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1798	int ret = 0;
1799	struct cp_control cpc;
 
1800	struct gc_inode_list gc_list = {
1801		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1802		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1803	};
 
 
1804	unsigned int skipped_round = 0, round = 0;
1805	unsigned int upper_secs;
1806
1807	trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1808				gc_control->nr_free_secs,
1809				get_pages(sbi, F2FS_DIRTY_NODES),
1810				get_pages(sbi, F2FS_DIRTY_DENTS),
1811				get_pages(sbi, F2FS_DIRTY_IMETA),
1812				free_sections(sbi),
1813				free_segments(sbi),
1814				reserved_segments(sbi),
1815				prefree_segments(sbi));
1816
1817	cpc.reason = __get_cp_reason(sbi);
1818gc_more:
1819	sbi->skipped_gc_rwsem = 0;
 
 
1820	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1821		ret = -EINVAL;
1822		goto stop;
1823	}
1824	if (unlikely(f2fs_cp_error(sbi))) {
1825		ret = -EIO;
1826		goto stop;
1827	}
1828
1829	/* Let's run FG_GC, if we don't have enough space. */
1830	if (has_not_enough_free_secs(sbi, 0, 0)) {
1831		gc_type = FG_GC;
1832
1833		/*
1834		 * For example, if there are many prefree_segments below given
1835		 * threshold, we can make them free by checkpoint. Then, we
1836		 * secure free segments which doesn't need fggc any more.
1837		 */
1838		if (prefree_segments(sbi)) {
1839			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1840			ret = f2fs_write_checkpoint(sbi, &cpc);
1841			if (ret)
1842				goto stop;
1843			/* Reset due to checkpoint */
1844			sec_freed = 0;
1845		}
 
 
1846	}
1847
1848	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1849	if (gc_type == BG_GC && gc_control->no_bg_gc) {
1850		ret = -EINVAL;
1851		goto stop;
1852	}
1853retry:
1854	ret = __get_victim(sbi, &segno, gc_type);
1855	if (ret) {
1856		/* allow to search victim from sections has pinned data */
1857		if (ret == -ENODATA && gc_type == FG_GC &&
1858				f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1859			f2fs_unpin_all_sections(sbi, false);
1860			goto retry;
1861		}
1862		goto stop;
1863	}
1864
1865	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1866				gc_control->should_migrate_blocks);
1867	if (seg_freed < 0)
1868		goto stop;
1869
1870	total_freed += seg_freed;
1871
1872	if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
1873		sec_freed++;
1874		total_sec_freed++;
1875	}
1876
1877	if (gc_type == FG_GC) {
1878		sbi->cur_victim_sec = NULL_SEGNO;
1879
1880		if (has_enough_free_secs(sbi, sec_freed, 0)) {
1881			if (!gc_control->no_bg_gc &&
1882			    total_sec_freed < gc_control->nr_free_secs)
1883				goto go_gc_more;
1884			goto stop;
1885		}
1886		if (sbi->skipped_gc_rwsem)
1887			skipped_round++;
 
1888		round++;
1889		if (skipped_round > MAX_SKIP_GC_COUNT &&
1890				skipped_round * 2 >= round) {
1891			stat_inc_cp_call_count(sbi, TOTAL_CALL);
1892			ret = f2fs_write_checkpoint(sbi, &cpc);
1893			goto stop;
1894		}
1895	} else if (has_enough_free_secs(sbi, 0, 0)) {
1896		goto stop;
1897	}
1898
1899	__get_secs_required(sbi, NULL, &upper_secs, NULL);
 
1900
1901	/*
1902	 * Write checkpoint to reclaim prefree segments.
1903	 * We need more three extra sections for writer's data/node/dentry.
1904	 */
1905	if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1906				prefree_segments(sbi)) {
1907		stat_inc_cp_call_count(sbi, TOTAL_CALL);
1908		ret = f2fs_write_checkpoint(sbi, &cpc);
1909		if (ret)
1910			goto stop;
1911		/* Reset due to checkpoint */
1912		sec_freed = 0;
1913	}
1914go_gc_more:
1915	segno = NULL_SEGNO;
1916	goto gc_more;
1917
 
 
 
 
 
 
 
 
 
 
1918stop:
1919	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1920	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1921
1922	if (gc_type == FG_GC)
1923		f2fs_unpin_all_sections(sbi, true);
1924
1925	trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1926				get_pages(sbi, F2FS_DIRTY_NODES),
1927				get_pages(sbi, F2FS_DIRTY_DENTS),
1928				get_pages(sbi, F2FS_DIRTY_IMETA),
1929				free_sections(sbi),
1930				free_segments(sbi),
1931				reserved_segments(sbi),
1932				prefree_segments(sbi));
1933
1934	f2fs_up_write(&sbi->gc_lock);
1935
1936	put_gc_inode(&gc_list);
1937
1938	if (gc_control->err_gc_skipped && !ret)
1939		ret = total_sec_freed ? 0 : -EAGAIN;
1940	return ret;
1941}
1942
1943int __init f2fs_create_garbage_collection_cache(void)
1944{
1945	victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1946					sizeof(struct victim_entry));
1947	return victim_entry_slab ? 0 : -ENOMEM;
1948}
1949
1950void f2fs_destroy_garbage_collection_cache(void)
1951{
1952	kmem_cache_destroy(victim_entry_slab);
1953}
1954
1955static void init_atgc_management(struct f2fs_sb_info *sbi)
1956{
1957	struct atgc_management *am = &sbi->am;
1958
1959	if (test_opt(sbi, ATGC) &&
1960		SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1961		am->atgc_enabled = true;
1962
1963	am->root = RB_ROOT_CACHED;
1964	INIT_LIST_HEAD(&am->victim_list);
1965	am->victim_count = 0;
1966
1967	am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1968	am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1969	am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1970	am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1971}
1972
1973void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1974{
 
 
1975	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1976
1977	/* give warm/cold data area from slower device */
1978	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1979		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1980				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1981
1982	init_atgc_management(sbi);
1983}
1984
1985int f2fs_gc_range(struct f2fs_sb_info *sbi,
1986		unsigned int start_seg, unsigned int end_seg,
1987		bool dry_run, unsigned int dry_run_sections)
1988{
1989	unsigned int segno;
1990	unsigned int gc_secs = dry_run_sections;
1991
1992	if (unlikely(f2fs_cp_error(sbi)))
1993		return -EIO;
1994
1995	for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
1996		struct gc_inode_list gc_list = {
1997			.ilist = LIST_HEAD_INIT(gc_list.ilist),
1998			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1999		};
2000
2001		do_garbage_collect(sbi, segno, &gc_list, FG_GC,
2002						dry_run_sections == 0);
2003		put_gc_inode(&gc_list);
2004
2005		if (!dry_run && get_valid_blocks(sbi, segno, true))
2006			return -EAGAIN;
2007		if (dry_run && dry_run_sections &&
2008		    !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2009			break;
2010
2011		if (fatal_signal_pending(current))
2012			return -ERESTARTSYS;
2013	}
2014
2015	return 0;
2016}
2017
2018static int free_segment_range(struct f2fs_sb_info *sbi,
2019				unsigned int secs, bool dry_run)
2020{
2021	unsigned int next_inuse, start, end;
2022	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2023	int gc_mode, gc_type;
2024	int err = 0;
2025	int type;
2026
2027	/* Force block allocation for GC */
2028	MAIN_SECS(sbi) -= secs;
2029	start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2030	end = MAIN_SEGS(sbi) - 1;
2031
2032	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2033	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2034		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2035			SIT_I(sbi)->last_victim[gc_mode] = 0;
2036
2037	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2038		if (sbi->next_victim_seg[gc_type] >= start)
2039			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2040	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2041
2042	/* Move out cursegs from the target range */
2043	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2044		err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2045		if (err)
2046			goto out;
2047	}
2048
2049	/* do GC to move out valid blocks in the range */
2050	err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2051	if (err || dry_run)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2052		goto out;
2053
2054	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2055	err = f2fs_write_checkpoint(sbi, &cpc);
2056	if (err)
2057		goto out;
2058
2059	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2060	if (next_inuse <= end) {
2061		f2fs_err(sbi, "segno %u should be free but still inuse!",
2062			 next_inuse);
2063		f2fs_bug_on(sbi, 1);
2064	}
2065out:
2066	MAIN_SECS(sbi) += secs;
2067	return err;
2068}
2069
2070static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2071{
2072	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2073	int section_count;
2074	int segment_count;
2075	int segment_count_main;
2076	long long block_count;
2077	int segs = secs * SEGS_PER_SEC(sbi);
2078
2079	f2fs_down_write(&sbi->sb_lock);
2080
2081	section_count = le32_to_cpu(raw_sb->section_count);
2082	segment_count = le32_to_cpu(raw_sb->segment_count);
2083	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2084	block_count = le64_to_cpu(raw_sb->block_count);
2085
2086	raw_sb->section_count = cpu_to_le32(section_count + secs);
2087	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2088	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2089	raw_sb->block_count = cpu_to_le64(block_count +
2090			(long long)SEGS_TO_BLKS(sbi, segs));
2091	if (f2fs_is_multi_device(sbi)) {
2092		int last_dev = sbi->s_ndevs - 1;
2093		int dev_segs =
2094			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2095
2096		raw_sb->devs[last_dev].total_segments =
2097						cpu_to_le32(dev_segs + segs);
2098	}
2099
2100	f2fs_up_write(&sbi->sb_lock);
2101}
2102
2103static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2104{
2105	int segs = secs * SEGS_PER_SEC(sbi);
2106	long long blks = SEGS_TO_BLKS(sbi, segs);
2107	long long user_block_count =
2108				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2109
2110	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2111	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2112	MAIN_SECS(sbi) += secs;
2113	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2114	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2115	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2116
2117	if (f2fs_is_multi_device(sbi)) {
2118		int last_dev = sbi->s_ndevs - 1;
2119
2120		FDEV(last_dev).total_segments =
2121				(int)FDEV(last_dev).total_segments + segs;
2122		FDEV(last_dev).end_blk =
2123				(long long)FDEV(last_dev).end_blk + blks;
2124#ifdef CONFIG_BLK_DEV_ZONED
2125		FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2126					div_u64(blks, sbi->blocks_per_blkz);
2127#endif
2128	}
2129}
2130
2131int f2fs_resize_fs(struct file *filp, __u64 block_count)
2132{
2133	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2134	__u64 old_block_count, shrunk_blocks;
2135	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2136	unsigned int secs;
2137	int err = 0;
2138	__u32 rem;
2139
2140	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2141	if (block_count > old_block_count)
2142		return -EINVAL;
2143
2144	if (f2fs_is_multi_device(sbi)) {
2145		int last_dev = sbi->s_ndevs - 1;
2146		__u64 last_segs = FDEV(last_dev).total_segments;
2147
2148		if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2149								old_block_count)
2150			return -EINVAL;
2151	}
2152
2153	/* new fs size should align to section size */
2154	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2155	if (rem)
2156		return -EINVAL;
2157
2158	if (block_count == old_block_count)
2159		return 0;
2160
2161	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2162		f2fs_err(sbi, "Should run fsck to repair first.");
2163		return -EFSCORRUPTED;
2164	}
2165
2166	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2167		f2fs_err(sbi, "Checkpoint should be enabled.");
2168		return -EINVAL;
2169	}
2170
2171	err = mnt_want_write_file(filp);
2172	if (err)
2173		return err;
2174
2175	shrunk_blocks = old_block_count - block_count;
2176	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2177
2178	/* stop other GC */
2179	if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2180		err = -EAGAIN;
2181		goto out_drop_write;
2182	}
2183
2184	/* stop CP to protect MAIN_SEC in free_segment_range */
2185	f2fs_lock_op(sbi);
2186
2187	spin_lock(&sbi->stat_lock);
2188	if (shrunk_blocks + valid_user_blocks(sbi) +
2189		sbi->current_reserved_blocks + sbi->unusable_block_count +
2190		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2191		err = -ENOSPC;
2192	spin_unlock(&sbi->stat_lock);
2193
2194	if (err)
2195		goto out_unlock;
2196
2197	err = free_segment_range(sbi, secs, true);
2198
2199out_unlock:
2200	f2fs_unlock_op(sbi);
2201	f2fs_up_write(&sbi->gc_lock);
2202out_drop_write:
2203	mnt_drop_write_file(filp);
2204	if (err)
2205		return err;
2206
2207	err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2208	if (err)
2209		return err;
2210
2211	if (f2fs_readonly(sbi->sb)) {
2212		err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2213		if (err)
2214			return err;
2215		return -EROFS;
2216	}
2217
2218	f2fs_down_write(&sbi->gc_lock);
2219	f2fs_down_write(&sbi->cp_global_sem);
 
2220
2221	spin_lock(&sbi->stat_lock);
2222	if (shrunk_blocks + valid_user_blocks(sbi) +
2223		sbi->current_reserved_blocks + sbi->unusable_block_count +
2224		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2225		err = -ENOSPC;
2226	else
2227		sbi->user_block_count -= shrunk_blocks;
2228	spin_unlock(&sbi->stat_lock);
2229	if (err)
2230		goto out_err;
2231
2232	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2233	err = free_segment_range(sbi, secs, false);
2234	if (err)
2235		goto recover_out;
2236
2237	update_sb_metadata(sbi, -secs);
2238
2239	err = f2fs_commit_super(sbi, false);
2240	if (err) {
2241		update_sb_metadata(sbi, secs);
2242		goto recover_out;
2243	}
2244
2245	update_fs_metadata(sbi, -secs);
2246	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2247	set_sbi_flag(sbi, SBI_IS_DIRTY);
2248
2249	stat_inc_cp_call_count(sbi, TOTAL_CALL);
2250	err = f2fs_write_checkpoint(sbi, &cpc);
2251	if (err) {
2252		update_fs_metadata(sbi, secs);
2253		update_sb_metadata(sbi, secs);
2254		f2fs_commit_super(sbi, false);
2255	}
2256recover_out:
2257	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2258	if (err) {
2259		set_sbi_flag(sbi, SBI_NEED_FSCK);
2260		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2261
2262		spin_lock(&sbi->stat_lock);
2263		sbi->user_block_count += shrunk_blocks;
2264		spin_unlock(&sbi->stat_lock);
2265	}
2266out_err:
2267	f2fs_up_write(&sbi->cp_global_sem);
2268	f2fs_up_write(&sbi->gc_lock);
2269	thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
 
2270	return err;
2271}
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/gc.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/module.h>
  10#include <linux/backing-dev.h>
  11#include <linux/init.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/kthread.h>
  14#include <linux/delay.h>
  15#include <linux/freezer.h>
  16#include <linux/sched/signal.h>
 
 
  17
  18#include "f2fs.h"
  19#include "node.h"
  20#include "segment.h"
  21#include "gc.h"
 
  22#include <trace/events/f2fs.h>
  23
 
 
  24static unsigned int count_bits(const unsigned long *addr,
  25				unsigned int offset, unsigned int len);
  26
  27static int gc_thread_func(void *data)
  28{
  29	struct f2fs_sb_info *sbi = data;
  30	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  31	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
 
  32	unsigned int wait_ms;
 
 
 
 
  33
  34	wait_ms = gc_th->min_sleep_time;
  35
  36	set_freezable();
  37	do {
  38		bool sync_mode;
  39
  40		wait_event_interruptible_timeout(*wq,
  41				kthread_should_stop() || freezing(current) ||
 
  42				gc_th->gc_wake,
  43				msecs_to_jiffies(wait_ms));
  44
 
 
 
  45		/* give it a try one time */
  46		if (gc_th->gc_wake)
  47			gc_th->gc_wake = 0;
  48
  49		if (try_to_freeze()) {
  50			stat_other_skip_bggc_count(sbi);
  51			continue;
  52		}
  53		if (kthread_should_stop())
  54			break;
  55
  56		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
  57			increase_sleep_time(gc_th, &wait_ms);
  58			stat_other_skip_bggc_count(sbi);
  59			continue;
  60		}
  61
  62		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
  63			f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
  64			f2fs_stop_checkpoint(sbi, false);
  65		}
  66
  67		if (!sb_start_write_trylock(sbi->sb)) {
  68			stat_other_skip_bggc_count(sbi);
  69			continue;
  70		}
  71
  72		/*
  73		 * [GC triggering condition]
  74		 * 0. GC is not conducted currently.
  75		 * 1. There are enough dirty segments.
  76		 * 2. IO subsystem is idle by checking the # of writeback pages.
  77		 * 3. IO subsystem is idle by checking the # of requests in
  78		 *    bdev's request list.
  79		 *
  80		 * Note) We have to avoid triggering GCs frequently.
  81		 * Because it is possible that some segments can be
  82		 * invalidated soon after by user update or deletion.
  83		 * So, I'd like to wait some time to collect dirty segments.
  84		 */
  85		if (sbi->gc_mode == GC_URGENT_HIGH) {
 
  86			wait_ms = gc_th->urgent_sleep_time;
  87			down_write(&sbi->gc_lock);
  88			goto do_gc;
  89		}
  90
  91		if (!down_write_trylock(&sbi->gc_lock)) {
 
 
 
  92			stat_other_skip_bggc_count(sbi);
  93			goto next;
  94		}
  95
  96		if (!is_idle(sbi, GC_TIME)) {
  97			increase_sleep_time(gc_th, &wait_ms);
  98			up_write(&sbi->gc_lock);
  99			stat_io_skip_bggc_count(sbi);
 100			goto next;
 101		}
 102
 103		if (has_enough_invalid_blocks(sbi))
 104			decrease_sleep_time(gc_th, &wait_ms);
 105		else
 106			increase_sleep_time(gc_th, &wait_ms);
 107do_gc:
 108		stat_inc_bggc_count(sbi->stat_info);
 
 109
 110		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
 111
 
 
 
 
 
 
 
 
 112		/* if return value is not zero, no victim was selected */
 113		if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
 114			wait_ms = gc_th->no_gc_sleep_time;
 
 
 
 
 
 
 
 
 
 
 115
 116		trace_f2fs_background_gc(sbi->sb, wait_ms,
 117				prefree_segments(sbi), free_segments(sbi));
 118
 119		/* balancing f2fs's metadata periodically */
 120		f2fs_balance_fs_bg(sbi, true);
 121next:
 
 
 
 
 
 
 
 
 
 122		sb_end_write(sbi->sb);
 123
 124	} while (!kthread_should_stop());
 125	return 0;
 126}
 127
 128int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
 129{
 130	struct f2fs_gc_kthread *gc_th;
 131	dev_t dev = sbi->sb->s_bdev->bd_dev;
 132	int err = 0;
 133
 134	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
 135	if (!gc_th) {
 136		err = -ENOMEM;
 137		goto out;
 138	}
 139
 140	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
 141	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
 142	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
 143	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
 144
 145	gc_th->gc_wake= 0;
 146
 147	sbi->gc_thread = gc_th;
 148	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
 
 149	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
 150			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 151	if (IS_ERR(gc_th->f2fs_gc_task)) {
 152		err = PTR_ERR(gc_th->f2fs_gc_task);
 153		kvfree(gc_th);
 
 154		sbi->gc_thread = NULL;
 
 155	}
 156out:
 157	return err;
 158}
 159
 160void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
 161{
 162	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 
 163	if (!gc_th)
 164		return;
 165	kthread_stop(gc_th->f2fs_gc_task);
 166	kvfree(gc_th);
 
 167	sbi->gc_thread = NULL;
 168}
 169
 170static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
 171{
 172	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
 
 
 
 
 
 
 
 
 
 173
 174	switch (sbi->gc_mode) {
 175	case GC_IDLE_CB:
 176		gc_mode = GC_CB;
 177		break;
 178	case GC_IDLE_GREEDY:
 179	case GC_URGENT_HIGH:
 180		gc_mode = GC_GREEDY;
 181		break;
 
 
 
 182	}
 
 183	return gc_mode;
 184}
 185
 186static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 187			int type, struct victim_sel_policy *p)
 188{
 189	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 190
 191	if (p->alloc_mode == SSR) {
 192		p->gc_mode = GC_GREEDY;
 193		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 194		p->max_search = dirty_i->nr_dirty[type];
 195		p->ofs_unit = 1;
 
 
 
 
 
 196	} else {
 197		p->gc_mode = select_gc_type(sbi, gc_type);
 198		p->ofs_unit = sbi->segs_per_sec;
 199		if (__is_large_section(sbi)) {
 200			p->dirty_bitmap = dirty_i->dirty_secmap;
 201			p->max_search = count_bits(p->dirty_bitmap,
 202						0, MAIN_SECS(sbi));
 203		} else {
 204			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
 205			p->max_search = dirty_i->nr_dirty[DIRTY];
 206		}
 207	}
 208
 209	/*
 210	 * adjust candidates range, should select all dirty segments for
 211	 * foreground GC and urgent GC cases.
 212	 */
 213	if (gc_type != FG_GC &&
 214			(sbi->gc_mode != GC_URGENT_HIGH) &&
 
 215			p->max_search > sbi->max_victim_search)
 216		p->max_search = sbi->max_victim_search;
 217
 218	/* let's select beginning hot/small space first in no_heap mode*/
 219	if (test_opt(sbi, NOHEAP) &&
 220		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
 
 
 221		p->offset = 0;
 222	else
 223		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
 224}
 225
 226static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 227				struct victim_sel_policy *p)
 228{
 229	/* SSR allocates in a segment unit */
 230	if (p->alloc_mode == SSR)
 231		return sbi->blocks_per_seg;
 
 
 
 
 232	if (p->gc_mode == GC_GREEDY)
 233		return 2 * sbi->blocks_per_seg * p->ofs_unit;
 234	else if (p->gc_mode == GC_CB)
 235		return UINT_MAX;
 
 
 236	else /* No other gc_mode */
 237		return 0;
 238}
 239
 240static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 241{
 242	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 243	unsigned int secno;
 244
 245	/*
 246	 * If the gc_type is FG_GC, we can select victim segments
 247	 * selected by background GC before.
 248	 * Those segments guarantee they have small valid blocks.
 249	 */
 250	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
 251		if (sec_usage_check(sbi, secno))
 252			continue;
 253		clear_bit(secno, dirty_i->victim_secmap);
 254		return GET_SEG_FROM_SEC(sbi, secno);
 255	}
 256	return NULL_SEGNO;
 257}
 258
 259static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 260{
 261	struct sit_info *sit_i = SIT_I(sbi);
 262	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 263	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 264	unsigned long long mtime = 0;
 265	unsigned int vblocks;
 266	unsigned char age = 0;
 267	unsigned char u;
 268	unsigned int i;
 
 269
 270	for (i = 0; i < sbi->segs_per_sec; i++)
 271		mtime += get_seg_entry(sbi, start + i)->mtime;
 272	vblocks = get_valid_blocks(sbi, segno, true);
 273
 274	mtime = div_u64(mtime, sbi->segs_per_sec);
 275	vblocks = div_u64(vblocks, sbi->segs_per_sec);
 276
 277	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
 278
 279	/* Handle if the system time has changed by the user */
 280	if (mtime < sit_i->min_mtime)
 281		sit_i->min_mtime = mtime;
 282	if (mtime > sit_i->max_mtime)
 283		sit_i->max_mtime = mtime;
 284	if (sit_i->max_mtime != sit_i->min_mtime)
 285		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
 286				sit_i->max_mtime - sit_i->min_mtime);
 287
 288	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 289}
 290
 291static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
 292			unsigned int segno, struct victim_sel_policy *p)
 293{
 294	if (p->alloc_mode == SSR)
 295		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
 296
 297	/* alloc_mode == LFS */
 298	if (p->gc_mode == GC_GREEDY)
 299		return get_valid_blocks(sbi, segno, true);
 300	else
 301		return get_cb_cost(sbi, segno);
 
 
 
 302}
 303
 304static unsigned int count_bits(const unsigned long *addr,
 305				unsigned int offset, unsigned int len)
 306{
 307	unsigned int end = offset + len, sum = 0;
 308
 309	while (offset < end) {
 310		if (test_bit(offset++, addr))
 311			++sum;
 312	}
 313	return sum;
 314}
 315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 316/*
 317 * This function is called from two paths.
 318 * One is garbage collection and the other is SSR segment selection.
 319 * When it is called during GC, it just gets a victim segment
 320 * and it does not remove it from dirty seglist.
 321 * When it is called from SSR segment selection, it finds a segment
 322 * which has minimum valid blocks and removes it from dirty seglist.
 323 */
 324static int get_victim_by_default(struct f2fs_sb_info *sbi,
 325		unsigned int *result, int gc_type, int type, char alloc_mode)
 
 326{
 327	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 328	struct sit_info *sm = SIT_I(sbi);
 329	struct victim_sel_policy p;
 330	unsigned int secno, last_victim;
 331	unsigned int last_segment;
 332	unsigned int nsearched = 0;
 
 333	int ret = 0;
 334
 335	mutex_lock(&dirty_i->seglist_lock);
 336	last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
 337
 338	p.alloc_mode = alloc_mode;
 
 
 
 
 339	select_policy(sbi, gc_type, type, &p);
 340
 341	p.min_segno = NULL_SEGNO;
 
 342	p.min_cost = get_max_cost(sbi, &p);
 343
 
 
 
 
 
 
 344	if (*result != NULL_SEGNO) {
 345		if (!get_valid_blocks(sbi, *result, false)) {
 346			ret = -ENODATA;
 347			goto out;
 348		}
 349
 350		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 351			ret = -EBUSY;
 352		else
 353			p.min_segno = *result;
 354		goto out;
 355	}
 356
 357	ret = -ENODATA;
 358	if (p.max_search == 0)
 359		goto out;
 360
 361	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
 362		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
 363			p.min_segno = sbi->next_victim_seg[BG_GC];
 364			*result = p.min_segno;
 365			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
 366			goto got_result;
 367		}
 368		if (gc_type == FG_GC &&
 369				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
 370			p.min_segno = sbi->next_victim_seg[FG_GC];
 371			*result = p.min_segno;
 372			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 373			goto got_result;
 374		}
 375	}
 376
 377	last_victim = sm->last_victim[p.gc_mode];
 378	if (p.alloc_mode == LFS && gc_type == FG_GC) {
 379		p.min_segno = check_bg_victims(sbi);
 380		if (p.min_segno != NULL_SEGNO)
 381			goto got_it;
 382	}
 383
 384	while (1) {
 385		unsigned long cost, *dirty_bitmap;
 386		unsigned int unit_no, segno;
 387
 388		dirty_bitmap = p.dirty_bitmap;
 389		unit_no = find_next_bit(dirty_bitmap,
 390				last_segment / p.ofs_unit,
 391				p.offset / p.ofs_unit);
 392		segno = unit_no * p.ofs_unit;
 393		if (segno >= last_segment) {
 394			if (sm->last_victim[p.gc_mode]) {
 395				last_segment =
 396					sm->last_victim[p.gc_mode];
 397				sm->last_victim[p.gc_mode] = 0;
 398				p.offset = 0;
 399				continue;
 400			}
 401			break;
 402		}
 403
 404		p.offset = segno + p.ofs_unit;
 405		nsearched++;
 406
 407#ifdef CONFIG_F2FS_CHECK_FS
 408		/*
 409		 * skip selecting the invalid segno (that is failed due to block
 410		 * validity check failure during GC) to avoid endless GC loop in
 411		 * such cases.
 412		 */
 413		if (test_bit(segno, sm->invalid_segmap))
 414			goto next;
 415#endif
 416
 417		secno = GET_SEC_FROM_SEG(sbi, segno);
 418
 419		if (sec_usage_check(sbi, secno))
 420			goto next;
 
 421		/* Don't touch checkpointed data */
 422		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
 423					get_ckpt_valid_blocks(sbi, segno) &&
 424					p.alloc_mode != SSR))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425			goto next;
 426		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 
 
 
 
 
 427			goto next;
 
 428
 429		cost = get_gc_cost(sbi, segno, &p);
 430
 431		if (p.min_cost > cost) {
 432			p.min_segno = segno;
 433			p.min_cost = cost;
 434		}
 435next:
 436		if (nsearched >= p.max_search) {
 437			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
 438				sm->last_victim[p.gc_mode] =
 439					last_victim + p.ofs_unit;
 440			else
 441				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
 442			sm->last_victim[p.gc_mode] %=
 443				(MAIN_SECS(sbi) * sbi->segs_per_sec);
 444			break;
 445		}
 446	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 447	if (p.min_segno != NULL_SEGNO) {
 448got_it:
 449		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 450got_result:
 451		if (p.alloc_mode == LFS) {
 452			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 453			if (gc_type == FG_GC)
 454				sbi->cur_victim_sec = secno;
 455			else
 456				set_bit(secno, dirty_i->victim_secmap);
 457		}
 458		ret = 0;
 459
 460	}
 461out:
 462	if (p.min_segno != NULL_SEGNO)
 463		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 464				sbi->cur_victim_sec,
 465				prefree_segments(sbi), free_segments(sbi));
 466	mutex_unlock(&dirty_i->seglist_lock);
 467
 468	return ret;
 469}
 470
 471static const struct victim_selection default_v_ops = {
 472	.get_victim = get_victim_by_default,
 473};
 474
 475static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
 476{
 477	struct inode_entry *ie;
 478
 479	ie = radix_tree_lookup(&gc_list->iroot, ino);
 480	if (ie)
 481		return ie->inode;
 482	return NULL;
 483}
 484
 485static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
 486{
 487	struct inode_entry *new_ie;
 488
 489	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
 490		iput(inode);
 491		return;
 492	}
 493	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
 
 494	new_ie->inode = inode;
 495
 496	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
 497	list_add_tail(&new_ie->list, &gc_list->ilist);
 498}
 499
 500static void put_gc_inode(struct gc_inode_list *gc_list)
 501{
 502	struct inode_entry *ie, *next_ie;
 
 503	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
 504		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
 505		iput(ie->inode);
 506		list_del(&ie->list);
 507		kmem_cache_free(f2fs_inode_entry_slab, ie);
 508	}
 509}
 510
 511static int check_valid_map(struct f2fs_sb_info *sbi,
 512				unsigned int segno, int offset)
 513{
 514	struct sit_info *sit_i = SIT_I(sbi);
 515	struct seg_entry *sentry;
 516	int ret;
 517
 518	down_read(&sit_i->sentry_lock);
 519	sentry = get_seg_entry(sbi, segno);
 520	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
 521	up_read(&sit_i->sentry_lock);
 522	return ret;
 523}
 524
 525/*
 526 * This function compares node address got in summary with that in NAT.
 527 * On validity, copy that node with cold status, otherwise (invalid node)
 528 * ignore that.
 529 */
 530static int gc_node_segment(struct f2fs_sb_info *sbi,
 531		struct f2fs_summary *sum, unsigned int segno, int gc_type)
 532{
 533	struct f2fs_summary *entry;
 534	block_t start_addr;
 535	int off;
 536	int phase = 0;
 537	bool fggc = (gc_type == FG_GC);
 538	int submitted = 0;
 
 539
 540	start_addr = START_BLOCK(sbi, segno);
 541
 542next_step:
 543	entry = sum;
 544
 545	if (fggc && phase == 2)
 546		atomic_inc(&sbi->wb_sync_req[NODE]);
 547
 548	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 549		nid_t nid = le32_to_cpu(entry->nid);
 550		struct page *node_page;
 551		struct node_info ni;
 552		int err;
 553
 554		/* stop BG_GC if there is not enough free sections. */
 555		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
 556			return submitted;
 557
 558		if (check_valid_map(sbi, segno, off) == 0)
 559			continue;
 560
 561		if (phase == 0) {
 562			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
 563							META_NAT, true);
 564			continue;
 565		}
 566
 567		if (phase == 1) {
 568			f2fs_ra_node_page(sbi, nid);
 569			continue;
 570		}
 571
 572		/* phase == 2 */
 573		node_page = f2fs_get_node_page(sbi, nid);
 574		if (IS_ERR(node_page))
 575			continue;
 576
 577		/* block may become invalid during f2fs_get_node_page */
 578		if (check_valid_map(sbi, segno, off) == 0) {
 579			f2fs_put_page(node_page, 1);
 580			continue;
 581		}
 582
 583		if (f2fs_get_node_info(sbi, nid, &ni)) {
 584			f2fs_put_page(node_page, 1);
 585			continue;
 586		}
 587
 588		if (ni.blk_addr != start_addr + off) {
 589			f2fs_put_page(node_page, 1);
 590			continue;
 591		}
 592
 593		err = f2fs_move_node_page(node_page, gc_type);
 594		if (!err && gc_type == FG_GC)
 595			submitted++;
 596		stat_inc_node_blk_count(sbi, 1, gc_type);
 597	}
 598
 599	if (++phase < 3)
 600		goto next_step;
 601
 602	if (fggc)
 603		atomic_dec(&sbi->wb_sync_req[NODE]);
 604	return submitted;
 605}
 606
 607/*
 608 * Calculate start block index indicating the given node offset.
 609 * Be careful, caller should give this node offset only indicating direct node
 610 * blocks. If any node offsets, which point the other types of node blocks such
 611 * as indirect or double indirect node blocks, are given, it must be a caller's
 612 * bug.
 613 */
 614block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
 615{
 616	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
 617	unsigned int bidx;
 618
 619	if (node_ofs == 0)
 620		return 0;
 621
 622	if (node_ofs <= 2) {
 623		bidx = node_ofs - 1;
 624	} else if (node_ofs <= indirect_blks) {
 625		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
 
 626		bidx = node_ofs - 2 - dec;
 627	} else {
 628		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
 
 629		bidx = node_ofs - 5 - dec;
 630	}
 631	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
 632}
 633
 634static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 635		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
 636{
 637	struct page *node_page;
 638	nid_t nid;
 639	unsigned int ofs_in_node;
 640	block_t source_blkaddr;
 641
 642	nid = le32_to_cpu(sum->nid);
 643	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
 644
 645	node_page = f2fs_get_node_page(sbi, nid);
 646	if (IS_ERR(node_page))
 647		return false;
 648
 649	if (f2fs_get_node_info(sbi, nid, dni)) {
 650		f2fs_put_page(node_page, 1);
 651		return false;
 652	}
 653
 654	if (sum->version != dni->version) {
 655		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
 656			  __func__);
 657		set_sbi_flag(sbi, SBI_NEED_FSCK);
 658	}
 659
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 660	*nofs = ofs_of_node(node_page);
 661	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
 662	f2fs_put_page(node_page, 1);
 663
 664	if (source_blkaddr != blkaddr) {
 665#ifdef CONFIG_F2FS_CHECK_FS
 666		unsigned int segno = GET_SEGNO(sbi, blkaddr);
 667		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 668
 669		if (unlikely(check_valid_map(sbi, segno, offset))) {
 670			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
 671				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
 672						blkaddr, source_blkaddr, segno);
 673				f2fs_bug_on(sbi, 1);
 674			}
 675		}
 676#endif
 677		return false;
 678	}
 679	return true;
 680}
 681
 682static int ra_data_block(struct inode *inode, pgoff_t index)
 683{
 684	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 685	struct address_space *mapping = inode->i_mapping;
 686	struct dnode_of_data dn;
 687	struct page *page;
 688	struct extent_info ei = {0, 0, 0};
 689	struct f2fs_io_info fio = {
 690		.sbi = sbi,
 691		.ino = inode->i_ino,
 692		.type = DATA,
 693		.temp = COLD,
 694		.op = REQ_OP_READ,
 695		.op_flags = 0,
 696		.encrypted_page = NULL,
 697		.in_list = false,
 698		.retry = false,
 699	};
 700	int err;
 701
 702	page = f2fs_grab_cache_page(mapping, index, true);
 703	if (!page)
 704		return -ENOMEM;
 705
 706	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
 707		dn.data_blkaddr = ei.blk + index - ei.fofs;
 708		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
 709						DATA_GENERIC_ENHANCE_READ))) {
 710			err = -EFSCORRUPTED;
 711			goto put_page;
 712		}
 713		goto got_it;
 714	}
 715
 716	set_new_dnode(&dn, inode, NULL, NULL, 0);
 717	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
 718	if (err)
 719		goto put_page;
 720	f2fs_put_dnode(&dn);
 721
 722	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
 723		err = -ENOENT;
 724		goto put_page;
 725	}
 726	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
 727						DATA_GENERIC_ENHANCE))) {
 728		err = -EFSCORRUPTED;
 729		goto put_page;
 730	}
 731got_it:
 732	/* read page */
 733	fio.page = page;
 734	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
 735
 736	/*
 737	 * don't cache encrypted data into meta inode until previous dirty
 738	 * data were writebacked to avoid racing between GC and flush.
 739	 */
 740	f2fs_wait_on_page_writeback(page, DATA, true, true);
 741
 742	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 743
 744	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
 745					dn.data_blkaddr,
 746					FGP_LOCK | FGP_CREAT, GFP_NOFS);
 747	if (!fio.encrypted_page) {
 748		err = -ENOMEM;
 749		goto put_page;
 750	}
 751
 752	err = f2fs_submit_page_bio(&fio);
 753	if (err)
 754		goto put_encrypted_page;
 755	f2fs_put_page(fio.encrypted_page, 0);
 756	f2fs_put_page(page, 1);
 757
 758	f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
 759	f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
 760
 761	return 0;
 762put_encrypted_page:
 763	f2fs_put_page(fio.encrypted_page, 1);
 764put_page:
 765	f2fs_put_page(page, 1);
 766	return err;
 767}
 768
 769/*
 770 * Move data block via META_MAPPING while keeping locked data page.
 771 * This can be used to move blocks, aka LBAs, directly on disk.
 772 */
 773static int move_data_block(struct inode *inode, block_t bidx,
 774				int gc_type, unsigned int segno, int off)
 775{
 776	struct f2fs_io_info fio = {
 777		.sbi = F2FS_I_SB(inode),
 778		.ino = inode->i_ino,
 779		.type = DATA,
 780		.temp = COLD,
 781		.op = REQ_OP_READ,
 782		.op_flags = 0,
 783		.encrypted_page = NULL,
 784		.in_list = false,
 785		.retry = false,
 786	};
 787	struct dnode_of_data dn;
 788	struct f2fs_summary sum;
 789	struct node_info ni;
 790	struct page *page, *mpage;
 791	block_t newaddr;
 792	int err = 0;
 793	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
 
 
 
 794
 795	/* do not read out */
 796	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
 797	if (!page)
 798		return -ENOMEM;
 799
 800	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
 801		err = -ENOENT;
 802		goto out;
 803	}
 804
 805	if (f2fs_is_atomic_file(inode)) {
 806		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
 807		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
 808		err = -EAGAIN;
 809		goto out;
 810	}
 811
 812	if (f2fs_is_pinned_file(inode)) {
 813		f2fs_pin_file_control(inode, true);
 814		err = -EAGAIN;
 815		goto out;
 816	}
 817
 818	set_new_dnode(&dn, inode, NULL, NULL, 0);
 819	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
 820	if (err)
 821		goto out;
 822
 823	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
 824		ClearPageUptodate(page);
 825		err = -ENOENT;
 826		goto put_out;
 827	}
 828
 829	/*
 830	 * don't cache encrypted data into meta inode until previous dirty
 831	 * data were writebacked to avoid racing between GC and flush.
 832	 */
 833	f2fs_wait_on_page_writeback(page, DATA, true, true);
 834
 835	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 836
 837	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
 838	if (err)
 839		goto put_out;
 840
 841	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
 842
 843	/* read page */
 844	fio.page = page;
 845	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
 846
 847	if (lfs_mode)
 848		down_write(&fio.sbi->io_order_lock);
 849
 850	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
 851					fio.old_blkaddr, false);
 852	if (!mpage) {
 853		err = -ENOMEM;
 854		goto up_out;
 855	}
 856
 857	fio.encrypted_page = mpage;
 858
 859	/* read source block in mpage */
 860	if (!PageUptodate(mpage)) {
 861		err = f2fs_submit_page_bio(&fio);
 862		if (err) {
 863			f2fs_put_page(mpage, 1);
 864			goto up_out;
 865		}
 866
 867		f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
 868		f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
 
 
 869
 870		lock_page(mpage);
 871		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
 872						!PageUptodate(mpage))) {
 873			err = -EIO;
 874			f2fs_put_page(mpage, 1);
 875			goto up_out;
 876		}
 877	}
 878
 879	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
 880					&sum, CURSEG_COLD_DATA, NULL);
 
 
 
 
 
 
 
 
 881
 882	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
 883				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
 884	if (!fio.encrypted_page) {
 885		err = -ENOMEM;
 886		f2fs_put_page(mpage, 1);
 887		goto recover_block;
 888	}
 889
 890	/* write target block */
 891	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
 892	memcpy(page_address(fio.encrypted_page),
 893				page_address(mpage), PAGE_SIZE);
 894	f2fs_put_page(mpage, 1);
 895	invalidate_mapping_pages(META_MAPPING(fio.sbi),
 896				fio.old_blkaddr, fio.old_blkaddr);
 897
 898	set_page_dirty(fio.encrypted_page);
 899	if (clear_page_dirty_for_io(fio.encrypted_page))
 900		dec_page_count(fio.sbi, F2FS_DIRTY_META);
 901
 902	set_page_writeback(fio.encrypted_page);
 903	ClearPageError(page);
 904
 905	/* allocate block address */
 906	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 907
 908	fio.op = REQ_OP_WRITE;
 909	fio.op_flags = REQ_SYNC;
 910	fio.new_blkaddr = newaddr;
 911	f2fs_submit_page_write(&fio);
 912	if (fio.retry) {
 913		err = -EAGAIN;
 914		if (PageWriteback(fio.encrypted_page))
 915			end_page_writeback(fio.encrypted_page);
 916		goto put_page_out;
 917	}
 918
 919	f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
 920
 921	f2fs_update_data_blkaddr(&dn, newaddr);
 922	set_inode_flag(inode, FI_APPEND_WRITE);
 923	if (page->index == 0)
 924		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
 925put_page_out:
 926	f2fs_put_page(fio.encrypted_page, 1);
 927recover_block:
 928	if (err)
 929		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
 930								true, true);
 931up_out:
 932	if (lfs_mode)
 933		up_write(&fio.sbi->io_order_lock);
 934put_out:
 935	f2fs_put_dnode(&dn);
 936out:
 937	f2fs_put_page(page, 1);
 938	return err;
 939}
 940
 941static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
 942							unsigned int segno, int off)
 943{
 944	struct page *page;
 945	int err = 0;
 946
 947	page = f2fs_get_lock_data_page(inode, bidx, true);
 948	if (IS_ERR(page))
 949		return PTR_ERR(page);
 950
 951	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
 952		err = -ENOENT;
 953		goto out;
 954	}
 955
 956	if (f2fs_is_atomic_file(inode)) {
 957		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
 958		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
 959		err = -EAGAIN;
 960		goto out;
 961	}
 962	if (f2fs_is_pinned_file(inode)) {
 963		if (gc_type == FG_GC)
 964			f2fs_pin_file_control(inode, true);
 965		err = -EAGAIN;
 966		goto out;
 967	}
 968
 969	if (gc_type == BG_GC) {
 970		if (PageWriteback(page)) {
 971			err = -EAGAIN;
 972			goto out;
 973		}
 974		set_page_dirty(page);
 975		set_cold_data(page);
 976	} else {
 977		struct f2fs_io_info fio = {
 978			.sbi = F2FS_I_SB(inode),
 979			.ino = inode->i_ino,
 980			.type = DATA,
 981			.temp = COLD,
 982			.op = REQ_OP_WRITE,
 983			.op_flags = REQ_SYNC,
 984			.old_blkaddr = NULL_ADDR,
 985			.page = page,
 986			.encrypted_page = NULL,
 987			.need_lock = LOCK_REQ,
 988			.io_type = FS_GC_DATA_IO,
 989		};
 990		bool is_dirty = PageDirty(page);
 991
 992retry:
 993		f2fs_wait_on_page_writeback(page, DATA, true, true);
 994
 995		set_page_dirty(page);
 996		if (clear_page_dirty_for_io(page)) {
 997			inode_dec_dirty_pages(inode);
 998			f2fs_remove_dirty_inode(inode);
 999		}
1000
1001		set_cold_data(page);
1002
1003		err = f2fs_do_write_data_page(&fio);
1004		if (err) {
1005			clear_cold_data(page);
1006			if (err == -ENOMEM) {
1007				congestion_wait(BLK_RW_ASYNC,
1008						DEFAULT_IO_TIMEOUT);
1009				goto retry;
1010			}
1011			if (is_dirty)
1012				set_page_dirty(page);
1013		}
1014	}
1015out:
1016	f2fs_put_page(page, 1);
1017	return err;
1018}
1019
1020/*
1021 * This function tries to get parent node of victim data block, and identifies
1022 * data block validity. If the block is valid, copy that with cold status and
1023 * modify parent node.
1024 * If the parent node is not valid or the data block address is different,
1025 * the victim data block is ignored.
1026 */
1027static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1028		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
 
1029{
1030	struct super_block *sb = sbi->sb;
1031	struct f2fs_summary *entry;
1032	block_t start_addr;
1033	int off;
1034	int phase = 0;
1035	int submitted = 0;
 
1036
1037	start_addr = START_BLOCK(sbi, segno);
1038
1039next_step:
1040	entry = sum;
1041
1042	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
1043		struct page *data_page;
1044		struct inode *inode;
1045		struct node_info dni; /* dnode info for the data */
1046		unsigned int ofs_in_node, nofs;
1047		block_t start_bidx;
1048		nid_t nid = le32_to_cpu(entry->nid);
1049
1050		/*
1051		 * stop BG_GC if there is not enough free sections.
1052		 * Or, stop GC if the segment becomes fully valid caused by
1053		 * race condition along with SSR block allocation.
1054		 */
1055		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1056				get_valid_blocks(sbi, segno, true) ==
1057							BLKS_PER_SEC(sbi))
1058			return submitted;
1059
1060		if (check_valid_map(sbi, segno, off) == 0)
1061			continue;
1062
1063		if (phase == 0) {
1064			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1065							META_NAT, true);
1066			continue;
1067		}
1068
1069		if (phase == 1) {
1070			f2fs_ra_node_page(sbi, nid);
1071			continue;
1072		}
1073
1074		/* Get an inode by ino with checking validity */
1075		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1076			continue;
1077
1078		if (phase == 2) {
1079			f2fs_ra_node_page(sbi, dni.ino);
1080			continue;
1081		}
1082
1083		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1084
1085		if (phase == 3) {
 
 
1086			inode = f2fs_iget(sb, dni.ino);
1087			if (IS_ERR(inode) || is_bad_inode(inode)) {
1088				set_sbi_flag(sbi, SBI_NEED_FSCK);
 
 
 
 
1089				continue;
1090			}
1091
1092			if (!down_write_trylock(
 
 
 
 
 
 
1093				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1094				iput(inode);
1095				sbi->skipped_gc_rwsem++;
1096				continue;
1097			}
1098
1099			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1100								ofs_in_node;
1101
1102			if (f2fs_post_read_required(inode)) {
1103				int err = ra_data_block(inode, start_bidx);
1104
1105				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1106				if (err) {
1107					iput(inode);
1108					continue;
1109				}
1110				add_gc_inode(gc_list, inode);
1111				continue;
1112			}
1113
1114			data_page = f2fs_get_read_data_page(inode,
1115						start_bidx, REQ_RAHEAD, true);
1116			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1117			if (IS_ERR(data_page)) {
1118				iput(inode);
1119				continue;
1120			}
1121
1122			f2fs_put_page(data_page, 0);
1123			add_gc_inode(gc_list, inode);
1124			continue;
1125		}
1126
1127		/* phase 4 */
1128		inode = find_gc_inode(gc_list, dni.ino);
1129		if (inode) {
1130			struct f2fs_inode_info *fi = F2FS_I(inode);
1131			bool locked = false;
1132			int err;
1133
1134			if (S_ISREG(inode->i_mode)) {
1135				if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
 
1136					continue;
1137				if (!down_write_trylock(
1138						&fi->i_gc_rwsem[WRITE])) {
 
1139					sbi->skipped_gc_rwsem++;
1140					up_write(&fi->i_gc_rwsem[READ]);
1141					continue;
1142				}
1143				locked = true;
1144
1145				/* wait for all inflight aio data */
1146				inode_dio_wait(inode);
1147			}
1148
1149			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1150								+ ofs_in_node;
1151			if (f2fs_post_read_required(inode))
1152				err = move_data_block(inode, start_bidx,
1153							gc_type, segno, off);
1154			else
1155				err = move_data_page(inode, start_bidx, gc_type,
1156								segno, off);
1157
1158			if (!err && (gc_type == FG_GC ||
1159					f2fs_post_read_required(inode)))
1160				submitted++;
1161
1162			if (locked) {
1163				up_write(&fi->i_gc_rwsem[WRITE]);
1164				up_write(&fi->i_gc_rwsem[READ]);
1165			}
1166
1167			stat_inc_data_blk_count(sbi, 1, gc_type);
1168		}
1169	}
1170
1171	if (++phase < 5)
1172		goto next_step;
1173
1174	return submitted;
1175}
1176
1177static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1178			int gc_type)
1179{
1180	struct sit_info *sit_i = SIT_I(sbi);
1181	int ret;
1182
1183	down_write(&sit_i->sentry_lock);
1184	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1185					      NO_CHECK_TYPE, LFS);
1186	up_write(&sit_i->sentry_lock);
1187	return ret;
1188}
1189
1190static int do_garbage_collect(struct f2fs_sb_info *sbi,
1191				unsigned int start_segno,
1192				struct gc_inode_list *gc_list, int gc_type)
 
1193{
1194	struct page *sum_page;
1195	struct f2fs_summary_block *sum;
1196	struct blk_plug plug;
1197	unsigned int segno = start_segno;
1198	unsigned int end_segno = start_segno + sbi->segs_per_sec;
1199	int seg_freed = 0, migrated = 0;
1200	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1201						SUM_TYPE_DATA : SUM_TYPE_NODE;
 
1202	int submitted = 0;
1203
1204	if (__is_large_section(sbi))
1205		end_segno = rounddown(end_segno, sbi->segs_per_sec);
 
 
 
 
 
 
 
 
 
 
 
1206
1207	/* readahead multi ssa blocks those have contiguous address */
1208	if (__is_large_section(sbi))
1209		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1210					end_segno - segno, META_SSA, true);
1211
1212	/* reference all summary page */
1213	while (segno < end_segno) {
1214		sum_page = f2fs_get_sum_page(sbi, segno++);
1215		if (IS_ERR(sum_page)) {
1216			int err = PTR_ERR(sum_page);
1217
1218			end_segno = segno - 1;
1219			for (segno = start_segno; segno < end_segno; segno++) {
1220				sum_page = find_get_page(META_MAPPING(sbi),
1221						GET_SUM_BLOCK(sbi, segno));
1222				f2fs_put_page(sum_page, 0);
1223				f2fs_put_page(sum_page, 0);
1224			}
1225			return err;
1226		}
1227		unlock_page(sum_page);
1228	}
1229
1230	blk_start_plug(&plug);
1231
1232	for (segno = start_segno; segno < end_segno; segno++) {
1233
1234		/* find segment summary of victim */
1235		sum_page = find_get_page(META_MAPPING(sbi),
1236					GET_SUM_BLOCK(sbi, segno));
1237		f2fs_put_page(sum_page, 0);
1238
1239		if (get_valid_blocks(sbi, segno, false) == 0)
1240			goto freed;
1241		if (gc_type == BG_GC && __is_large_section(sbi) &&
1242				migrated >= sbi->migration_granularity)
1243			goto skip;
1244		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1245			goto skip;
1246
1247		sum = page_address(sum_page);
1248		if (type != GET_SUM_TYPE((&sum->footer))) {
1249			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1250				 segno, type, GET_SUM_TYPE((&sum->footer)));
1251			set_sbi_flag(sbi, SBI_NEED_FSCK);
1252			f2fs_stop_checkpoint(sbi, false);
 
1253			goto skip;
1254		}
1255
1256		/*
1257		 * this is to avoid deadlock:
1258		 * - lock_page(sum_page)         - f2fs_replace_block
1259		 *  - check_valid_map()            - down_write(sentry_lock)
1260		 *   - down_read(sentry_lock)     - change_curseg()
1261		 *                                  - lock_page(sum_page)
1262		 */
1263		if (type == SUM_TYPE_NODE)
1264			submitted += gc_node_segment(sbi, sum->entries, segno,
1265								gc_type);
1266		else
1267			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1268							segno, gc_type);
 
1269
1270		stat_inc_seg_count(sbi, type, gc_type);
 
1271		migrated++;
1272
1273freed:
1274		if (gc_type == FG_GC &&
1275				get_valid_blocks(sbi, segno, false) == 0)
1276			seg_freed++;
1277
1278		if (__is_large_section(sbi) && segno + 1 < end_segno)
1279			sbi->next_victim_seg[gc_type] = segno + 1;
 
1280skip:
1281		f2fs_put_page(sum_page, 0);
1282	}
1283
1284	if (submitted)
1285		f2fs_submit_merged_write(sbi,
1286				(type == SUM_TYPE_NODE) ? NODE : DATA);
1287
1288	blk_finish_plug(&plug);
1289
1290	stat_inc_call_count(sbi->stat_info);
 
1291
1292	return seg_freed;
1293}
1294
1295int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1296			bool background, unsigned int segno)
1297{
1298	int gc_type = sync ? FG_GC : BG_GC;
1299	int sec_freed = 0, seg_freed = 0, total_freed = 0;
 
1300	int ret = 0;
1301	struct cp_control cpc;
1302	unsigned int init_segno = segno;
1303	struct gc_inode_list gc_list = {
1304		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1305		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1306	};
1307	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1308	unsigned long long first_skipped;
1309	unsigned int skipped_round = 0, round = 0;
 
1310
1311	trace_f2fs_gc_begin(sbi->sb, sync, background,
 
1312				get_pages(sbi, F2FS_DIRTY_NODES),
1313				get_pages(sbi, F2FS_DIRTY_DENTS),
1314				get_pages(sbi, F2FS_DIRTY_IMETA),
1315				free_sections(sbi),
1316				free_segments(sbi),
1317				reserved_segments(sbi),
1318				prefree_segments(sbi));
1319
1320	cpc.reason = __get_cp_reason(sbi);
 
1321	sbi->skipped_gc_rwsem = 0;
1322	first_skipped = last_skipped;
1323gc_more:
1324	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1325		ret = -EINVAL;
1326		goto stop;
1327	}
1328	if (unlikely(f2fs_cp_error(sbi))) {
1329		ret = -EIO;
1330		goto stop;
1331	}
1332
1333	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
 
 
 
1334		/*
1335		 * For example, if there are many prefree_segments below given
1336		 * threshold, we can make them free by checkpoint. Then, we
1337		 * secure free segments which doesn't need fggc any more.
1338		 */
1339		if (prefree_segments(sbi) &&
1340				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1341			ret = f2fs_write_checkpoint(sbi, &cpc);
1342			if (ret)
1343				goto stop;
 
 
1344		}
1345		if (has_not_enough_free_secs(sbi, 0, 0))
1346			gc_type = FG_GC;
1347	}
1348
1349	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1350	if (gc_type == BG_GC && !background) {
1351		ret = -EINVAL;
1352		goto stop;
1353	}
 
1354	ret = __get_victim(sbi, &segno, gc_type);
1355	if (ret)
 
 
 
 
 
 
1356		goto stop;
 
1357
1358	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1359	if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
 
 
 
 
 
 
1360		sec_freed++;
1361	total_freed += seg_freed;
 
1362
1363	if (gc_type == FG_GC) {
1364		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1365						sbi->skipped_gc_rwsem)
 
 
 
 
 
 
 
1366			skipped_round++;
1367		last_skipped = sbi->skipped_atomic_files[FG_GC];
1368		round++;
 
 
 
 
 
 
 
 
1369	}
1370
1371	if (gc_type == FG_GC && seg_freed)
1372		sbi->cur_victim_sec = NULL_SEGNO;
1373
1374	if (sync)
1375		goto stop;
1376
1377	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1378		if (skipped_round <= MAX_SKIP_GC_COUNT ||
1379					skipped_round * 2 < round) {
1380			segno = NULL_SEGNO;
1381			goto gc_more;
1382		}
 
 
 
 
 
 
 
1383
1384		if (first_skipped < last_skipped &&
1385				(last_skipped - first_skipped) >
1386						sbi->skipped_gc_rwsem) {
1387			f2fs_drop_inmem_pages_all(sbi, true);
1388			segno = NULL_SEGNO;
1389			goto gc_more;
1390		}
1391		if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1392			ret = f2fs_write_checkpoint(sbi, &cpc);
1393	}
1394stop:
1395	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1396	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
 
 
 
1397
1398	trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1399				get_pages(sbi, F2FS_DIRTY_NODES),
1400				get_pages(sbi, F2FS_DIRTY_DENTS),
1401				get_pages(sbi, F2FS_DIRTY_IMETA),
1402				free_sections(sbi),
1403				free_segments(sbi),
1404				reserved_segments(sbi),
1405				prefree_segments(sbi));
1406
1407	up_write(&sbi->gc_lock);
1408
1409	put_gc_inode(&gc_list);
1410
1411	if (sync && !ret)
1412		ret = sec_freed ? 0 : -EAGAIN;
1413	return ret;
1414}
1415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1416void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1417{
1418	DIRTY_I(sbi)->v_ops = &default_v_ops;
1419
1420	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1421
1422	/* give warm/cold data area from slower device */
1423	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1424		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1425				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1426}
1427
1428static int free_segment_range(struct f2fs_sb_info *sbi,
1429				unsigned int secs, bool gc_only)
1430{
1431	unsigned int segno, next_inuse, start, end;
1432	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1433	int gc_mode, gc_type;
1434	int err = 0;
1435	int type;
1436
1437	/* Force block allocation for GC */
1438	MAIN_SECS(sbi) -= secs;
1439	start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1440	end = MAIN_SEGS(sbi) - 1;
1441
1442	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1443	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1444		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1445			SIT_I(sbi)->last_victim[gc_mode] = 0;
1446
1447	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1448		if (sbi->next_victim_seg[gc_type] >= start)
1449			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1450	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1451
1452	/* Move out cursegs from the target range */
1453	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
1454		f2fs_allocate_segment_for_resize(sbi, type, start, end);
 
 
 
1455
1456	/* do GC to move out valid blocks in the range */
1457	for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1458		struct gc_inode_list gc_list = {
1459			.ilist = LIST_HEAD_INIT(gc_list.ilist),
1460			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1461		};
1462
1463		do_garbage_collect(sbi, segno, &gc_list, FG_GC);
1464		put_gc_inode(&gc_list);
1465
1466		if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1467			err = -EAGAIN;
1468			goto out;
1469		}
1470		if (fatal_signal_pending(current)) {
1471			err = -ERESTARTSYS;
1472			goto out;
1473		}
1474	}
1475	if (gc_only)
1476		goto out;
1477
 
1478	err = f2fs_write_checkpoint(sbi, &cpc);
1479	if (err)
1480		goto out;
1481
1482	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1483	if (next_inuse <= end) {
1484		f2fs_err(sbi, "segno %u should be free but still inuse!",
1485			 next_inuse);
1486		f2fs_bug_on(sbi, 1);
1487	}
1488out:
1489	MAIN_SECS(sbi) += secs;
1490	return err;
1491}
1492
1493static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1494{
1495	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1496	int section_count;
1497	int segment_count;
1498	int segment_count_main;
1499	long long block_count;
1500	int segs = secs * sbi->segs_per_sec;
1501
1502	down_write(&sbi->sb_lock);
1503
1504	section_count = le32_to_cpu(raw_sb->section_count);
1505	segment_count = le32_to_cpu(raw_sb->segment_count);
1506	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1507	block_count = le64_to_cpu(raw_sb->block_count);
1508
1509	raw_sb->section_count = cpu_to_le32(section_count + secs);
1510	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1511	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1512	raw_sb->block_count = cpu_to_le64(block_count +
1513					(long long)segs * sbi->blocks_per_seg);
1514	if (f2fs_is_multi_device(sbi)) {
1515		int last_dev = sbi->s_ndevs - 1;
1516		int dev_segs =
1517			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1518
1519		raw_sb->devs[last_dev].total_segments =
1520						cpu_to_le32(dev_segs + segs);
1521	}
1522
1523	up_write(&sbi->sb_lock);
1524}
1525
1526static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1527{
1528	int segs = secs * sbi->segs_per_sec;
1529	long long blks = (long long)segs * sbi->blocks_per_seg;
1530	long long user_block_count =
1531				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1532
1533	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1534	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1535	MAIN_SECS(sbi) += secs;
1536	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1537	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1538	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1539
1540	if (f2fs_is_multi_device(sbi)) {
1541		int last_dev = sbi->s_ndevs - 1;
1542
1543		FDEV(last_dev).total_segments =
1544				(int)FDEV(last_dev).total_segments + segs;
1545		FDEV(last_dev).end_blk =
1546				(long long)FDEV(last_dev).end_blk + blks;
1547#ifdef CONFIG_BLK_DEV_ZONED
1548		FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1549					(int)(blks >> sbi->log_blocks_per_blkz);
1550#endif
1551	}
1552}
1553
1554int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1555{
 
1556	__u64 old_block_count, shrunk_blocks;
1557	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1558	unsigned int secs;
1559	int err = 0;
1560	__u32 rem;
1561
1562	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1563	if (block_count > old_block_count)
1564		return -EINVAL;
1565
1566	if (f2fs_is_multi_device(sbi)) {
1567		int last_dev = sbi->s_ndevs - 1;
1568		__u64 last_segs = FDEV(last_dev).total_segments;
1569
1570		if (block_count + last_segs * sbi->blocks_per_seg <=
1571								old_block_count)
1572			return -EINVAL;
1573	}
1574
1575	/* new fs size should align to section size */
1576	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1577	if (rem)
1578		return -EINVAL;
1579
1580	if (block_count == old_block_count)
1581		return 0;
1582
1583	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1584		f2fs_err(sbi, "Should run fsck to repair first.");
1585		return -EFSCORRUPTED;
1586	}
1587
1588	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1589		f2fs_err(sbi, "Checkpoint should be enabled.");
1590		return -EINVAL;
1591	}
1592
 
 
 
 
1593	shrunk_blocks = old_block_count - block_count;
1594	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
1595
1596	/* stop other GC */
1597	if (!down_write_trylock(&sbi->gc_lock))
1598		return -EAGAIN;
 
 
1599
1600	/* stop CP to protect MAIN_SEC in free_segment_range */
1601	f2fs_lock_op(sbi);
 
 
 
 
 
 
 
 
 
 
 
1602	err = free_segment_range(sbi, secs, true);
 
 
1603	f2fs_unlock_op(sbi);
1604	up_write(&sbi->gc_lock);
 
 
1605	if (err)
1606		return err;
1607
1608	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
 
 
 
 
 
 
 
 
 
1609
1610	freeze_super(sbi->sb);
1611	down_write(&sbi->gc_lock);
1612	mutex_lock(&sbi->cp_mutex);
1613
1614	spin_lock(&sbi->stat_lock);
1615	if (shrunk_blocks + valid_user_blocks(sbi) +
1616		sbi->current_reserved_blocks + sbi->unusable_block_count +
1617		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
1618		err = -ENOSPC;
1619	else
1620		sbi->user_block_count -= shrunk_blocks;
1621	spin_unlock(&sbi->stat_lock);
1622	if (err)
1623		goto out_err;
1624
 
1625	err = free_segment_range(sbi, secs, false);
1626	if (err)
1627		goto recover_out;
1628
1629	update_sb_metadata(sbi, -secs);
1630
1631	err = f2fs_commit_super(sbi, false);
1632	if (err) {
1633		update_sb_metadata(sbi, secs);
1634		goto recover_out;
1635	}
1636
1637	update_fs_metadata(sbi, -secs);
1638	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1639	set_sbi_flag(sbi, SBI_IS_DIRTY);
1640
 
1641	err = f2fs_write_checkpoint(sbi, &cpc);
1642	if (err) {
1643		update_fs_metadata(sbi, secs);
1644		update_sb_metadata(sbi, secs);
1645		f2fs_commit_super(sbi, false);
1646	}
1647recover_out:
 
1648	if (err) {
1649		set_sbi_flag(sbi, SBI_NEED_FSCK);
1650		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
1651
1652		spin_lock(&sbi->stat_lock);
1653		sbi->user_block_count += shrunk_blocks;
1654		spin_unlock(&sbi->stat_lock);
1655	}
1656out_err:
1657	mutex_unlock(&sbi->cp_mutex);
1658	up_write(&sbi->gc_lock);
1659	thaw_super(sbi->sb);
1660	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1661	return err;
1662}