Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/gc.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/module.h>
  10#include <linux/backing-dev.h>
  11#include <linux/init.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/kthread.h>
  14#include <linux/delay.h>
  15#include <linux/freezer.h>
  16#include <linux/sched/signal.h>
  17
  18#include "f2fs.h"
  19#include "node.h"
  20#include "segment.h"
  21#include "gc.h"
  22#include <trace/events/f2fs.h>
  23
  24static unsigned int count_bits(const unsigned long *addr,
  25				unsigned int offset, unsigned int len);
  26
  27static int gc_thread_func(void *data)
  28{
  29	struct f2fs_sb_info *sbi = data;
  30	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  31	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
  32	unsigned int wait_ms;
  33
  34	wait_ms = gc_th->min_sleep_time;
  35
  36	set_freezable();
  37	do {
  38		bool sync_mode;
  39
  40		wait_event_interruptible_timeout(*wq,
  41				kthread_should_stop() || freezing(current) ||
  42				gc_th->gc_wake,
  43				msecs_to_jiffies(wait_ms));
  44
  45		/* give it a try one time */
  46		if (gc_th->gc_wake)
  47			gc_th->gc_wake = 0;
  48
  49		if (try_to_freeze()) {
  50			stat_other_skip_bggc_count(sbi);
  51			continue;
  52		}
  53		if (kthread_should_stop())
  54			break;
  55
  56		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
  57			increase_sleep_time(gc_th, &wait_ms);
  58			stat_other_skip_bggc_count(sbi);
  59			continue;
  60		}
  61
  62		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
  63			f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
  64			f2fs_stop_checkpoint(sbi, false);
  65		}
  66
  67		if (!sb_start_write_trylock(sbi->sb)) {
  68			stat_other_skip_bggc_count(sbi);
  69			continue;
  70		}
  71
  72		/*
  73		 * [GC triggering condition]
  74		 * 0. GC is not conducted currently.
  75		 * 1. There are enough dirty segments.
  76		 * 2. IO subsystem is idle by checking the # of writeback pages.
  77		 * 3. IO subsystem is idle by checking the # of requests in
  78		 *    bdev's request list.
  79		 *
  80		 * Note) We have to avoid triggering GCs frequently.
  81		 * Because it is possible that some segments can be
  82		 * invalidated soon after by user update or deletion.
  83		 * So, I'd like to wait some time to collect dirty segments.
  84		 */
  85		if (sbi->gc_mode == GC_URGENT_HIGH) {
  86			wait_ms = gc_th->urgent_sleep_time;
  87			down_write(&sbi->gc_lock);
  88			goto do_gc;
  89		}
  90
  91		if (!down_write_trylock(&sbi->gc_lock)) {
  92			stat_other_skip_bggc_count(sbi);
  93			goto next;
  94		}
  95
  96		if (!is_idle(sbi, GC_TIME)) {
  97			increase_sleep_time(gc_th, &wait_ms);
  98			up_write(&sbi->gc_lock);
  99			stat_io_skip_bggc_count(sbi);
 100			goto next;
 101		}
 102
 103		if (has_enough_invalid_blocks(sbi))
 104			decrease_sleep_time(gc_th, &wait_ms);
 105		else
 106			increase_sleep_time(gc_th, &wait_ms);
 107do_gc:
 108		stat_inc_bggc_count(sbi->stat_info);
 109
 110		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
 111
 112		/* if return value is not zero, no victim was selected */
 113		if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
 114			wait_ms = gc_th->no_gc_sleep_time;
 115
 116		trace_f2fs_background_gc(sbi->sb, wait_ms,
 117				prefree_segments(sbi), free_segments(sbi));
 118
 119		/* balancing f2fs's metadata periodically */
 120		f2fs_balance_fs_bg(sbi, true);
 121next:
 122		sb_end_write(sbi->sb);
 123
 124	} while (!kthread_should_stop());
 125	return 0;
 126}
 127
 128int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
 129{
 130	struct f2fs_gc_kthread *gc_th;
 131	dev_t dev = sbi->sb->s_bdev->bd_dev;
 132	int err = 0;
 133
 134	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
 135	if (!gc_th) {
 136		err = -ENOMEM;
 137		goto out;
 138	}
 139
 140	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
 141	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
 142	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
 143	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
 144
 145	gc_th->gc_wake= 0;
 146
 147	sbi->gc_thread = gc_th;
 148	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
 149	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
 150			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 151	if (IS_ERR(gc_th->f2fs_gc_task)) {
 152		err = PTR_ERR(gc_th->f2fs_gc_task);
 153		kvfree(gc_th);
 154		sbi->gc_thread = NULL;
 155	}
 156out:
 157	return err;
 158}
 159
 160void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
 161{
 162	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 163	if (!gc_th)
 164		return;
 165	kthread_stop(gc_th->f2fs_gc_task);
 166	kvfree(gc_th);
 167	sbi->gc_thread = NULL;
 168}
 169
 170static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
 171{
 172	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
 173
 174	switch (sbi->gc_mode) {
 175	case GC_IDLE_CB:
 176		gc_mode = GC_CB;
 177		break;
 178	case GC_IDLE_GREEDY:
 179	case GC_URGENT_HIGH:
 180		gc_mode = GC_GREEDY;
 181		break;
 182	}
 183	return gc_mode;
 184}
 185
 186static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 187			int type, struct victim_sel_policy *p)
 188{
 189	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 190
 191	if (p->alloc_mode == SSR) {
 192		p->gc_mode = GC_GREEDY;
 193		p->dirty_bitmap = dirty_i->dirty_segmap[type];
 194		p->max_search = dirty_i->nr_dirty[type];
 195		p->ofs_unit = 1;
 196	} else {
 197		p->gc_mode = select_gc_type(sbi, gc_type);
 198		p->ofs_unit = sbi->segs_per_sec;
 199		if (__is_large_section(sbi)) {
 200			p->dirty_bitmap = dirty_i->dirty_secmap;
 201			p->max_search = count_bits(p->dirty_bitmap,
 202						0, MAIN_SECS(sbi));
 203		} else {
 204			p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
 205			p->max_search = dirty_i->nr_dirty[DIRTY];
 206		}
 207	}
 208
 209	/*
 210	 * adjust candidates range, should select all dirty segments for
 211	 * foreground GC and urgent GC cases.
 212	 */
 213	if (gc_type != FG_GC &&
 214			(sbi->gc_mode != GC_URGENT_HIGH) &&
 215			p->max_search > sbi->max_victim_search)
 216		p->max_search = sbi->max_victim_search;
 217
 218	/* let's select beginning hot/small space first in no_heap mode*/
 219	if (test_opt(sbi, NOHEAP) &&
 220		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
 221		p->offset = 0;
 222	else
 223		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
 224}
 225
 226static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 227				struct victim_sel_policy *p)
 228{
 229	/* SSR allocates in a segment unit */
 230	if (p->alloc_mode == SSR)
 231		return sbi->blocks_per_seg;
 232	if (p->gc_mode == GC_GREEDY)
 233		return 2 * sbi->blocks_per_seg * p->ofs_unit;
 234	else if (p->gc_mode == GC_CB)
 235		return UINT_MAX;
 236	else /* No other gc_mode */
 237		return 0;
 238}
 239
 240static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 241{
 242	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 243	unsigned int secno;
 244
 245	/*
 246	 * If the gc_type is FG_GC, we can select victim segments
 247	 * selected by background GC before.
 248	 * Those segments guarantee they have small valid blocks.
 249	 */
 250	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
 251		if (sec_usage_check(sbi, secno))
 252			continue;
 253		clear_bit(secno, dirty_i->victim_secmap);
 254		return GET_SEG_FROM_SEC(sbi, secno);
 255	}
 256	return NULL_SEGNO;
 257}
 258
 259static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 260{
 261	struct sit_info *sit_i = SIT_I(sbi);
 262	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 263	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
 264	unsigned long long mtime = 0;
 265	unsigned int vblocks;
 266	unsigned char age = 0;
 267	unsigned char u;
 268	unsigned int i;
 269
 270	for (i = 0; i < sbi->segs_per_sec; i++)
 271		mtime += get_seg_entry(sbi, start + i)->mtime;
 272	vblocks = get_valid_blocks(sbi, segno, true);
 273
 274	mtime = div_u64(mtime, sbi->segs_per_sec);
 275	vblocks = div_u64(vblocks, sbi->segs_per_sec);
 276
 277	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
 278
 279	/* Handle if the system time has changed by the user */
 280	if (mtime < sit_i->min_mtime)
 281		sit_i->min_mtime = mtime;
 282	if (mtime > sit_i->max_mtime)
 283		sit_i->max_mtime = mtime;
 284	if (sit_i->max_mtime != sit_i->min_mtime)
 285		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
 286				sit_i->max_mtime - sit_i->min_mtime);
 287
 288	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 289}
 290
 291static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
 292			unsigned int segno, struct victim_sel_policy *p)
 293{
 294	if (p->alloc_mode == SSR)
 295		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
 296
 297	/* alloc_mode == LFS */
 298	if (p->gc_mode == GC_GREEDY)
 299		return get_valid_blocks(sbi, segno, true);
 300	else
 301		return get_cb_cost(sbi, segno);
 302}
 303
 304static unsigned int count_bits(const unsigned long *addr,
 305				unsigned int offset, unsigned int len)
 306{
 307	unsigned int end = offset + len, sum = 0;
 308
 309	while (offset < end) {
 310		if (test_bit(offset++, addr))
 311			++sum;
 312	}
 313	return sum;
 314}
 315
 316/*
 317 * This function is called from two paths.
 318 * One is garbage collection and the other is SSR segment selection.
 319 * When it is called during GC, it just gets a victim segment
 320 * and it does not remove it from dirty seglist.
 321 * When it is called from SSR segment selection, it finds a segment
 322 * which has minimum valid blocks and removes it from dirty seglist.
 323 */
 324static int get_victim_by_default(struct f2fs_sb_info *sbi,
 325		unsigned int *result, int gc_type, int type, char alloc_mode)
 326{
 327	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 328	struct sit_info *sm = SIT_I(sbi);
 329	struct victim_sel_policy p;
 330	unsigned int secno, last_victim;
 331	unsigned int last_segment;
 332	unsigned int nsearched = 0;
 333	int ret = 0;
 334
 335	mutex_lock(&dirty_i->seglist_lock);
 336	last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
 337
 338	p.alloc_mode = alloc_mode;
 339	select_policy(sbi, gc_type, type, &p);
 340
 341	p.min_segno = NULL_SEGNO;
 342	p.min_cost = get_max_cost(sbi, &p);
 343
 344	if (*result != NULL_SEGNO) {
 345		if (!get_valid_blocks(sbi, *result, false)) {
 346			ret = -ENODATA;
 347			goto out;
 348		}
 349
 350		if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 351			ret = -EBUSY;
 352		else
 353			p.min_segno = *result;
 354		goto out;
 355	}
 356
 357	ret = -ENODATA;
 358	if (p.max_search == 0)
 359		goto out;
 360
 361	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
 362		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
 363			p.min_segno = sbi->next_victim_seg[BG_GC];
 364			*result = p.min_segno;
 365			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
 366			goto got_result;
 367		}
 368		if (gc_type == FG_GC &&
 369				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
 370			p.min_segno = sbi->next_victim_seg[FG_GC];
 371			*result = p.min_segno;
 372			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 373			goto got_result;
 374		}
 375	}
 376
 377	last_victim = sm->last_victim[p.gc_mode];
 378	if (p.alloc_mode == LFS && gc_type == FG_GC) {
 379		p.min_segno = check_bg_victims(sbi);
 380		if (p.min_segno != NULL_SEGNO)
 381			goto got_it;
 382	}
 383
 384	while (1) {
 385		unsigned long cost, *dirty_bitmap;
 386		unsigned int unit_no, segno;
 387
 388		dirty_bitmap = p.dirty_bitmap;
 389		unit_no = find_next_bit(dirty_bitmap,
 390				last_segment / p.ofs_unit,
 391				p.offset / p.ofs_unit);
 392		segno = unit_no * p.ofs_unit;
 393		if (segno >= last_segment) {
 394			if (sm->last_victim[p.gc_mode]) {
 395				last_segment =
 396					sm->last_victim[p.gc_mode];
 397				sm->last_victim[p.gc_mode] = 0;
 398				p.offset = 0;
 399				continue;
 400			}
 401			break;
 402		}
 403
 404		p.offset = segno + p.ofs_unit;
 405		nsearched++;
 406
 407#ifdef CONFIG_F2FS_CHECK_FS
 408		/*
 409		 * skip selecting the invalid segno (that is failed due to block
 410		 * validity check failure during GC) to avoid endless GC loop in
 411		 * such cases.
 412		 */
 413		if (test_bit(segno, sm->invalid_segmap))
 414			goto next;
 415#endif
 416
 417		secno = GET_SEC_FROM_SEG(sbi, segno);
 418
 419		if (sec_usage_check(sbi, secno))
 420			goto next;
 421		/* Don't touch checkpointed data */
 422		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
 423					get_ckpt_valid_blocks(sbi, segno) &&
 424					p.alloc_mode != SSR))
 425			goto next;
 426		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 427			goto next;
 428
 429		cost = get_gc_cost(sbi, segno, &p);
 430
 431		if (p.min_cost > cost) {
 432			p.min_segno = segno;
 433			p.min_cost = cost;
 434		}
 435next:
 436		if (nsearched >= p.max_search) {
 437			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
 438				sm->last_victim[p.gc_mode] =
 439					last_victim + p.ofs_unit;
 440			else
 441				sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
 442			sm->last_victim[p.gc_mode] %=
 443				(MAIN_SECS(sbi) * sbi->segs_per_sec);
 444			break;
 445		}
 446	}
 447	if (p.min_segno != NULL_SEGNO) {
 448got_it:
 449		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 450got_result:
 451		if (p.alloc_mode == LFS) {
 452			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 453			if (gc_type == FG_GC)
 454				sbi->cur_victim_sec = secno;
 455			else
 456				set_bit(secno, dirty_i->victim_secmap);
 457		}
 458		ret = 0;
 459
 460	}
 461out:
 462	if (p.min_segno != NULL_SEGNO)
 463		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 464				sbi->cur_victim_sec,
 465				prefree_segments(sbi), free_segments(sbi));
 466	mutex_unlock(&dirty_i->seglist_lock);
 467
 468	return ret;
 469}
 470
 471static const struct victim_selection default_v_ops = {
 472	.get_victim = get_victim_by_default,
 473};
 474
 475static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
 476{
 477	struct inode_entry *ie;
 478
 479	ie = radix_tree_lookup(&gc_list->iroot, ino);
 480	if (ie)
 481		return ie->inode;
 482	return NULL;
 483}
 484
 485static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
 486{
 487	struct inode_entry *new_ie;
 488
 489	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
 490		iput(inode);
 491		return;
 492	}
 493	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
 494	new_ie->inode = inode;
 495
 496	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
 497	list_add_tail(&new_ie->list, &gc_list->ilist);
 498}
 499
 500static void put_gc_inode(struct gc_inode_list *gc_list)
 501{
 502	struct inode_entry *ie, *next_ie;
 503	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
 504		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
 505		iput(ie->inode);
 506		list_del(&ie->list);
 507		kmem_cache_free(f2fs_inode_entry_slab, ie);
 508	}
 509}
 510
 511static int check_valid_map(struct f2fs_sb_info *sbi,
 512				unsigned int segno, int offset)
 513{
 514	struct sit_info *sit_i = SIT_I(sbi);
 515	struct seg_entry *sentry;
 516	int ret;
 517
 518	down_read(&sit_i->sentry_lock);
 519	sentry = get_seg_entry(sbi, segno);
 520	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
 521	up_read(&sit_i->sentry_lock);
 522	return ret;
 523}
 524
 525/*
 526 * This function compares node address got in summary with that in NAT.
 527 * On validity, copy that node with cold status, otherwise (invalid node)
 528 * ignore that.
 529 */
 530static int gc_node_segment(struct f2fs_sb_info *sbi,
 531		struct f2fs_summary *sum, unsigned int segno, int gc_type)
 532{
 533	struct f2fs_summary *entry;
 534	block_t start_addr;
 535	int off;
 536	int phase = 0;
 537	bool fggc = (gc_type == FG_GC);
 538	int submitted = 0;
 539
 540	start_addr = START_BLOCK(sbi, segno);
 541
 542next_step:
 543	entry = sum;
 544
 545	if (fggc && phase == 2)
 546		atomic_inc(&sbi->wb_sync_req[NODE]);
 547
 548	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 549		nid_t nid = le32_to_cpu(entry->nid);
 550		struct page *node_page;
 551		struct node_info ni;
 552		int err;
 553
 554		/* stop BG_GC if there is not enough free sections. */
 555		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
 556			return submitted;
 557
 558		if (check_valid_map(sbi, segno, off) == 0)
 559			continue;
 560
 561		if (phase == 0) {
 562			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
 563							META_NAT, true);
 564			continue;
 565		}
 566
 567		if (phase == 1) {
 568			f2fs_ra_node_page(sbi, nid);
 569			continue;
 570		}
 571
 572		/* phase == 2 */
 573		node_page = f2fs_get_node_page(sbi, nid);
 574		if (IS_ERR(node_page))
 575			continue;
 576
 577		/* block may become invalid during f2fs_get_node_page */
 578		if (check_valid_map(sbi, segno, off) == 0) {
 579			f2fs_put_page(node_page, 1);
 580			continue;
 581		}
 582
 583		if (f2fs_get_node_info(sbi, nid, &ni)) {
 584			f2fs_put_page(node_page, 1);
 585			continue;
 586		}
 587
 588		if (ni.blk_addr != start_addr + off) {
 589			f2fs_put_page(node_page, 1);
 590			continue;
 591		}
 592
 593		err = f2fs_move_node_page(node_page, gc_type);
 594		if (!err && gc_type == FG_GC)
 595			submitted++;
 596		stat_inc_node_blk_count(sbi, 1, gc_type);
 597	}
 598
 599	if (++phase < 3)
 600		goto next_step;
 601
 602	if (fggc)
 603		atomic_dec(&sbi->wb_sync_req[NODE]);
 604	return submitted;
 605}
 606
 607/*
 608 * Calculate start block index indicating the given node offset.
 609 * Be careful, caller should give this node offset only indicating direct node
 610 * blocks. If any node offsets, which point the other types of node blocks such
 611 * as indirect or double indirect node blocks, are given, it must be a caller's
 612 * bug.
 613 */
 614block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
 615{
 616	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
 617	unsigned int bidx;
 618
 619	if (node_ofs == 0)
 620		return 0;
 621
 622	if (node_ofs <= 2) {
 623		bidx = node_ofs - 1;
 624	} else if (node_ofs <= indirect_blks) {
 625		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
 626		bidx = node_ofs - 2 - dec;
 627	} else {
 628		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
 629		bidx = node_ofs - 5 - dec;
 630	}
 631	return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
 632}
 633
 634static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 635		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
 636{
 637	struct page *node_page;
 638	nid_t nid;
 639	unsigned int ofs_in_node;
 640	block_t source_blkaddr;
 641
 642	nid = le32_to_cpu(sum->nid);
 643	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
 644
 645	node_page = f2fs_get_node_page(sbi, nid);
 646	if (IS_ERR(node_page))
 647		return false;
 648
 649	if (f2fs_get_node_info(sbi, nid, dni)) {
 650		f2fs_put_page(node_page, 1);
 651		return false;
 652	}
 653
 654	if (sum->version != dni->version) {
 655		f2fs_warn(sbi, "%s: valid data with mismatched node version.",
 656			  __func__);
 657		set_sbi_flag(sbi, SBI_NEED_FSCK);
 658	}
 659
 660	*nofs = ofs_of_node(node_page);
 661	source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
 662	f2fs_put_page(node_page, 1);
 663
 664	if (source_blkaddr != blkaddr) {
 665#ifdef CONFIG_F2FS_CHECK_FS
 666		unsigned int segno = GET_SEGNO(sbi, blkaddr);
 667		unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 668
 669		if (unlikely(check_valid_map(sbi, segno, offset))) {
 670			if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
 671				f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
 672						blkaddr, source_blkaddr, segno);
 673				f2fs_bug_on(sbi, 1);
 674			}
 675		}
 676#endif
 677		return false;
 678	}
 679	return true;
 680}
 681
 682static int ra_data_block(struct inode *inode, pgoff_t index)
 683{
 684	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 685	struct address_space *mapping = inode->i_mapping;
 686	struct dnode_of_data dn;
 687	struct page *page;
 688	struct extent_info ei = {0, 0, 0};
 689	struct f2fs_io_info fio = {
 690		.sbi = sbi,
 691		.ino = inode->i_ino,
 692		.type = DATA,
 693		.temp = COLD,
 694		.op = REQ_OP_READ,
 695		.op_flags = 0,
 696		.encrypted_page = NULL,
 697		.in_list = false,
 698		.retry = false,
 699	};
 700	int err;
 701
 702	page = f2fs_grab_cache_page(mapping, index, true);
 703	if (!page)
 704		return -ENOMEM;
 705
 706	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
 707		dn.data_blkaddr = ei.blk + index - ei.fofs;
 708		if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
 709						DATA_GENERIC_ENHANCE_READ))) {
 710			err = -EFSCORRUPTED;
 711			goto put_page;
 712		}
 713		goto got_it;
 714	}
 715
 716	set_new_dnode(&dn, inode, NULL, NULL, 0);
 717	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
 718	if (err)
 719		goto put_page;
 720	f2fs_put_dnode(&dn);
 721
 722	if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
 723		err = -ENOENT;
 724		goto put_page;
 725	}
 726	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
 727						DATA_GENERIC_ENHANCE))) {
 728		err = -EFSCORRUPTED;
 729		goto put_page;
 730	}
 731got_it:
 732	/* read page */
 733	fio.page = page;
 734	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
 735
 736	/*
 737	 * don't cache encrypted data into meta inode until previous dirty
 738	 * data were writebacked to avoid racing between GC and flush.
 739	 */
 740	f2fs_wait_on_page_writeback(page, DATA, true, true);
 741
 742	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 743
 744	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
 745					dn.data_blkaddr,
 746					FGP_LOCK | FGP_CREAT, GFP_NOFS);
 747	if (!fio.encrypted_page) {
 748		err = -ENOMEM;
 749		goto put_page;
 750	}
 751
 752	err = f2fs_submit_page_bio(&fio);
 753	if (err)
 754		goto put_encrypted_page;
 755	f2fs_put_page(fio.encrypted_page, 0);
 756	f2fs_put_page(page, 1);
 757
 758	f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
 759	f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
 760
 761	return 0;
 762put_encrypted_page:
 763	f2fs_put_page(fio.encrypted_page, 1);
 764put_page:
 765	f2fs_put_page(page, 1);
 766	return err;
 767}
 768
 769/*
 770 * Move data block via META_MAPPING while keeping locked data page.
 771 * This can be used to move blocks, aka LBAs, directly on disk.
 772 */
 773static int move_data_block(struct inode *inode, block_t bidx,
 774				int gc_type, unsigned int segno, int off)
 775{
 776	struct f2fs_io_info fio = {
 777		.sbi = F2FS_I_SB(inode),
 778		.ino = inode->i_ino,
 779		.type = DATA,
 780		.temp = COLD,
 781		.op = REQ_OP_READ,
 782		.op_flags = 0,
 783		.encrypted_page = NULL,
 784		.in_list = false,
 785		.retry = false,
 786	};
 787	struct dnode_of_data dn;
 788	struct f2fs_summary sum;
 789	struct node_info ni;
 790	struct page *page, *mpage;
 791	block_t newaddr;
 792	int err = 0;
 793	bool lfs_mode = f2fs_lfs_mode(fio.sbi);
 794
 795	/* do not read out */
 796	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
 797	if (!page)
 798		return -ENOMEM;
 799
 800	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
 801		err = -ENOENT;
 802		goto out;
 803	}
 804
 805	if (f2fs_is_atomic_file(inode)) {
 806		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
 807		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
 808		err = -EAGAIN;
 809		goto out;
 810	}
 811
 812	if (f2fs_is_pinned_file(inode)) {
 813		f2fs_pin_file_control(inode, true);
 814		err = -EAGAIN;
 815		goto out;
 816	}
 817
 818	set_new_dnode(&dn, inode, NULL, NULL, 0);
 819	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
 820	if (err)
 821		goto out;
 822
 823	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
 824		ClearPageUptodate(page);
 825		err = -ENOENT;
 826		goto put_out;
 827	}
 828
 829	/*
 830	 * don't cache encrypted data into meta inode until previous dirty
 831	 * data were writebacked to avoid racing between GC and flush.
 832	 */
 833	f2fs_wait_on_page_writeback(page, DATA, true, true);
 834
 835	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 836
 837	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
 838	if (err)
 839		goto put_out;
 840
 841	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
 842
 843	/* read page */
 844	fio.page = page;
 845	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
 846
 847	if (lfs_mode)
 848		down_write(&fio.sbi->io_order_lock);
 849
 850	mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
 851					fio.old_blkaddr, false);
 852	if (!mpage) {
 853		err = -ENOMEM;
 854		goto up_out;
 855	}
 856
 857	fio.encrypted_page = mpage;
 858
 859	/* read source block in mpage */
 860	if (!PageUptodate(mpage)) {
 861		err = f2fs_submit_page_bio(&fio);
 862		if (err) {
 863			f2fs_put_page(mpage, 1);
 864			goto up_out;
 865		}
 866
 867		f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
 868		f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
 869
 870		lock_page(mpage);
 871		if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
 872						!PageUptodate(mpage))) {
 873			err = -EIO;
 874			f2fs_put_page(mpage, 1);
 875			goto up_out;
 876		}
 877	}
 878
 879	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
 880					&sum, CURSEG_COLD_DATA, NULL);
 881
 882	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
 883				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
 884	if (!fio.encrypted_page) {
 885		err = -ENOMEM;
 886		f2fs_put_page(mpage, 1);
 887		goto recover_block;
 888	}
 889
 890	/* write target block */
 891	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
 892	memcpy(page_address(fio.encrypted_page),
 893				page_address(mpage), PAGE_SIZE);
 894	f2fs_put_page(mpage, 1);
 895	invalidate_mapping_pages(META_MAPPING(fio.sbi),
 896				fio.old_blkaddr, fio.old_blkaddr);
 897
 898	set_page_dirty(fio.encrypted_page);
 899	if (clear_page_dirty_for_io(fio.encrypted_page))
 900		dec_page_count(fio.sbi, F2FS_DIRTY_META);
 901
 902	set_page_writeback(fio.encrypted_page);
 903	ClearPageError(page);
 904
 905	/* allocate block address */
 906	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 907
 908	fio.op = REQ_OP_WRITE;
 909	fio.op_flags = REQ_SYNC;
 910	fio.new_blkaddr = newaddr;
 911	f2fs_submit_page_write(&fio);
 912	if (fio.retry) {
 913		err = -EAGAIN;
 914		if (PageWriteback(fio.encrypted_page))
 915			end_page_writeback(fio.encrypted_page);
 916		goto put_page_out;
 917	}
 918
 919	f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
 920
 921	f2fs_update_data_blkaddr(&dn, newaddr);
 922	set_inode_flag(inode, FI_APPEND_WRITE);
 923	if (page->index == 0)
 924		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
 925put_page_out:
 926	f2fs_put_page(fio.encrypted_page, 1);
 927recover_block:
 928	if (err)
 929		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
 930								true, true);
 931up_out:
 932	if (lfs_mode)
 933		up_write(&fio.sbi->io_order_lock);
 934put_out:
 935	f2fs_put_dnode(&dn);
 936out:
 937	f2fs_put_page(page, 1);
 938	return err;
 939}
 940
 941static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
 942							unsigned int segno, int off)
 943{
 944	struct page *page;
 945	int err = 0;
 946
 947	page = f2fs_get_lock_data_page(inode, bidx, true);
 948	if (IS_ERR(page))
 949		return PTR_ERR(page);
 950
 951	if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
 952		err = -ENOENT;
 953		goto out;
 954	}
 955
 956	if (f2fs_is_atomic_file(inode)) {
 957		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
 958		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
 959		err = -EAGAIN;
 960		goto out;
 961	}
 962	if (f2fs_is_pinned_file(inode)) {
 963		if (gc_type == FG_GC)
 964			f2fs_pin_file_control(inode, true);
 965		err = -EAGAIN;
 966		goto out;
 967	}
 968
 969	if (gc_type == BG_GC) {
 970		if (PageWriteback(page)) {
 971			err = -EAGAIN;
 972			goto out;
 973		}
 974		set_page_dirty(page);
 975		set_cold_data(page);
 976	} else {
 977		struct f2fs_io_info fio = {
 978			.sbi = F2FS_I_SB(inode),
 979			.ino = inode->i_ino,
 980			.type = DATA,
 981			.temp = COLD,
 982			.op = REQ_OP_WRITE,
 983			.op_flags = REQ_SYNC,
 984			.old_blkaddr = NULL_ADDR,
 985			.page = page,
 986			.encrypted_page = NULL,
 987			.need_lock = LOCK_REQ,
 988			.io_type = FS_GC_DATA_IO,
 989		};
 990		bool is_dirty = PageDirty(page);
 991
 992retry:
 993		f2fs_wait_on_page_writeback(page, DATA, true, true);
 994
 995		set_page_dirty(page);
 996		if (clear_page_dirty_for_io(page)) {
 997			inode_dec_dirty_pages(inode);
 998			f2fs_remove_dirty_inode(inode);
 999		}
1000
1001		set_cold_data(page);
1002
1003		err = f2fs_do_write_data_page(&fio);
1004		if (err) {
1005			clear_cold_data(page);
1006			if (err == -ENOMEM) {
1007				congestion_wait(BLK_RW_ASYNC,
1008						DEFAULT_IO_TIMEOUT);
1009				goto retry;
1010			}
1011			if (is_dirty)
1012				set_page_dirty(page);
1013		}
1014	}
1015out:
1016	f2fs_put_page(page, 1);
1017	return err;
1018}
1019
1020/*
1021 * This function tries to get parent node of victim data block, and identifies
1022 * data block validity. If the block is valid, copy that with cold status and
1023 * modify parent node.
1024 * If the parent node is not valid or the data block address is different,
1025 * the victim data block is ignored.
1026 */
1027static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1028		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
1029{
1030	struct super_block *sb = sbi->sb;
1031	struct f2fs_summary *entry;
1032	block_t start_addr;
1033	int off;
1034	int phase = 0;
1035	int submitted = 0;
1036
1037	start_addr = START_BLOCK(sbi, segno);
1038
1039next_step:
1040	entry = sum;
1041
1042	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
1043		struct page *data_page;
1044		struct inode *inode;
1045		struct node_info dni; /* dnode info for the data */
1046		unsigned int ofs_in_node, nofs;
1047		block_t start_bidx;
1048		nid_t nid = le32_to_cpu(entry->nid);
1049
1050		/*
1051		 * stop BG_GC if there is not enough free sections.
1052		 * Or, stop GC if the segment becomes fully valid caused by
1053		 * race condition along with SSR block allocation.
1054		 */
1055		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1056				get_valid_blocks(sbi, segno, true) ==
1057							BLKS_PER_SEC(sbi))
1058			return submitted;
1059
1060		if (check_valid_map(sbi, segno, off) == 0)
1061			continue;
1062
1063		if (phase == 0) {
1064			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1065							META_NAT, true);
1066			continue;
1067		}
1068
1069		if (phase == 1) {
1070			f2fs_ra_node_page(sbi, nid);
1071			continue;
1072		}
1073
1074		/* Get an inode by ino with checking validity */
1075		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1076			continue;
1077
1078		if (phase == 2) {
1079			f2fs_ra_node_page(sbi, dni.ino);
1080			continue;
1081		}
1082
1083		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1084
1085		if (phase == 3) {
1086			inode = f2fs_iget(sb, dni.ino);
1087			if (IS_ERR(inode) || is_bad_inode(inode)) {
1088				set_sbi_flag(sbi, SBI_NEED_FSCK);
1089				continue;
1090			}
1091
1092			if (!down_write_trylock(
1093				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1094				iput(inode);
1095				sbi->skipped_gc_rwsem++;
1096				continue;
1097			}
1098
1099			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1100								ofs_in_node;
1101
1102			if (f2fs_post_read_required(inode)) {
1103				int err = ra_data_block(inode, start_bidx);
1104
1105				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1106				if (err) {
1107					iput(inode);
1108					continue;
1109				}
1110				add_gc_inode(gc_list, inode);
1111				continue;
1112			}
1113
1114			data_page = f2fs_get_read_data_page(inode,
1115						start_bidx, REQ_RAHEAD, true);
1116			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1117			if (IS_ERR(data_page)) {
1118				iput(inode);
1119				continue;
1120			}
1121
1122			f2fs_put_page(data_page, 0);
1123			add_gc_inode(gc_list, inode);
1124			continue;
1125		}
1126
1127		/* phase 4 */
1128		inode = find_gc_inode(gc_list, dni.ino);
1129		if (inode) {
1130			struct f2fs_inode_info *fi = F2FS_I(inode);
1131			bool locked = false;
1132			int err;
1133
1134			if (S_ISREG(inode->i_mode)) {
1135				if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1136					continue;
1137				if (!down_write_trylock(
1138						&fi->i_gc_rwsem[WRITE])) {
1139					sbi->skipped_gc_rwsem++;
1140					up_write(&fi->i_gc_rwsem[READ]);
1141					continue;
1142				}
1143				locked = true;
1144
1145				/* wait for all inflight aio data */
1146				inode_dio_wait(inode);
1147			}
1148
1149			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1150								+ ofs_in_node;
1151			if (f2fs_post_read_required(inode))
1152				err = move_data_block(inode, start_bidx,
1153							gc_type, segno, off);
1154			else
1155				err = move_data_page(inode, start_bidx, gc_type,
1156								segno, off);
1157
1158			if (!err && (gc_type == FG_GC ||
1159					f2fs_post_read_required(inode)))
1160				submitted++;
1161
1162			if (locked) {
1163				up_write(&fi->i_gc_rwsem[WRITE]);
1164				up_write(&fi->i_gc_rwsem[READ]);
1165			}
1166
1167			stat_inc_data_blk_count(sbi, 1, gc_type);
1168		}
1169	}
1170
1171	if (++phase < 5)
1172		goto next_step;
1173
1174	return submitted;
1175}
1176
1177static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1178			int gc_type)
1179{
1180	struct sit_info *sit_i = SIT_I(sbi);
1181	int ret;
1182
1183	down_write(&sit_i->sentry_lock);
1184	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1185					      NO_CHECK_TYPE, LFS);
1186	up_write(&sit_i->sentry_lock);
1187	return ret;
1188}
1189
1190static int do_garbage_collect(struct f2fs_sb_info *sbi,
1191				unsigned int start_segno,
1192				struct gc_inode_list *gc_list, int gc_type)
1193{
1194	struct page *sum_page;
1195	struct f2fs_summary_block *sum;
1196	struct blk_plug plug;
1197	unsigned int segno = start_segno;
1198	unsigned int end_segno = start_segno + sbi->segs_per_sec;
1199	int seg_freed = 0, migrated = 0;
1200	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1201						SUM_TYPE_DATA : SUM_TYPE_NODE;
1202	int submitted = 0;
1203
1204	if (__is_large_section(sbi))
1205		end_segno = rounddown(end_segno, sbi->segs_per_sec);
1206
1207	/* readahead multi ssa blocks those have contiguous address */
1208	if (__is_large_section(sbi))
1209		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1210					end_segno - segno, META_SSA, true);
1211
1212	/* reference all summary page */
1213	while (segno < end_segno) {
1214		sum_page = f2fs_get_sum_page(sbi, segno++);
1215		if (IS_ERR(sum_page)) {
1216			int err = PTR_ERR(sum_page);
1217
1218			end_segno = segno - 1;
1219			for (segno = start_segno; segno < end_segno; segno++) {
1220				sum_page = find_get_page(META_MAPPING(sbi),
1221						GET_SUM_BLOCK(sbi, segno));
1222				f2fs_put_page(sum_page, 0);
1223				f2fs_put_page(sum_page, 0);
1224			}
1225			return err;
1226		}
1227		unlock_page(sum_page);
1228	}
1229
1230	blk_start_plug(&plug);
1231
1232	for (segno = start_segno; segno < end_segno; segno++) {
1233
1234		/* find segment summary of victim */
1235		sum_page = find_get_page(META_MAPPING(sbi),
1236					GET_SUM_BLOCK(sbi, segno));
1237		f2fs_put_page(sum_page, 0);
1238
1239		if (get_valid_blocks(sbi, segno, false) == 0)
1240			goto freed;
1241		if (gc_type == BG_GC && __is_large_section(sbi) &&
1242				migrated >= sbi->migration_granularity)
1243			goto skip;
1244		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1245			goto skip;
1246
1247		sum = page_address(sum_page);
1248		if (type != GET_SUM_TYPE((&sum->footer))) {
1249			f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1250				 segno, type, GET_SUM_TYPE((&sum->footer)));
1251			set_sbi_flag(sbi, SBI_NEED_FSCK);
1252			f2fs_stop_checkpoint(sbi, false);
1253			goto skip;
1254		}
1255
1256		/*
1257		 * this is to avoid deadlock:
1258		 * - lock_page(sum_page)         - f2fs_replace_block
1259		 *  - check_valid_map()            - down_write(sentry_lock)
1260		 *   - down_read(sentry_lock)     - change_curseg()
1261		 *                                  - lock_page(sum_page)
1262		 */
1263		if (type == SUM_TYPE_NODE)
1264			submitted += gc_node_segment(sbi, sum->entries, segno,
1265								gc_type);
1266		else
1267			submitted += gc_data_segment(sbi, sum->entries, gc_list,
1268							segno, gc_type);
1269
1270		stat_inc_seg_count(sbi, type, gc_type);
1271		migrated++;
1272
1273freed:
1274		if (gc_type == FG_GC &&
1275				get_valid_blocks(sbi, segno, false) == 0)
1276			seg_freed++;
1277
1278		if (__is_large_section(sbi) && segno + 1 < end_segno)
1279			sbi->next_victim_seg[gc_type] = segno + 1;
1280skip:
1281		f2fs_put_page(sum_page, 0);
1282	}
1283
1284	if (submitted)
1285		f2fs_submit_merged_write(sbi,
1286				(type == SUM_TYPE_NODE) ? NODE : DATA);
1287
1288	blk_finish_plug(&plug);
1289
1290	stat_inc_call_count(sbi->stat_info);
1291
1292	return seg_freed;
1293}
1294
1295int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1296			bool background, unsigned int segno)
1297{
1298	int gc_type = sync ? FG_GC : BG_GC;
1299	int sec_freed = 0, seg_freed = 0, total_freed = 0;
1300	int ret = 0;
1301	struct cp_control cpc;
1302	unsigned int init_segno = segno;
1303	struct gc_inode_list gc_list = {
1304		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1305		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1306	};
1307	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1308	unsigned long long first_skipped;
1309	unsigned int skipped_round = 0, round = 0;
1310
1311	trace_f2fs_gc_begin(sbi->sb, sync, background,
1312				get_pages(sbi, F2FS_DIRTY_NODES),
1313				get_pages(sbi, F2FS_DIRTY_DENTS),
1314				get_pages(sbi, F2FS_DIRTY_IMETA),
1315				free_sections(sbi),
1316				free_segments(sbi),
1317				reserved_segments(sbi),
1318				prefree_segments(sbi));
1319
1320	cpc.reason = __get_cp_reason(sbi);
1321	sbi->skipped_gc_rwsem = 0;
1322	first_skipped = last_skipped;
1323gc_more:
1324	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1325		ret = -EINVAL;
1326		goto stop;
1327	}
1328	if (unlikely(f2fs_cp_error(sbi))) {
1329		ret = -EIO;
1330		goto stop;
1331	}
1332
1333	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1334		/*
1335		 * For example, if there are many prefree_segments below given
1336		 * threshold, we can make them free by checkpoint. Then, we
1337		 * secure free segments which doesn't need fggc any more.
1338		 */
1339		if (prefree_segments(sbi) &&
1340				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1341			ret = f2fs_write_checkpoint(sbi, &cpc);
1342			if (ret)
1343				goto stop;
1344		}
1345		if (has_not_enough_free_secs(sbi, 0, 0))
1346			gc_type = FG_GC;
1347	}
1348
1349	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1350	if (gc_type == BG_GC && !background) {
1351		ret = -EINVAL;
1352		goto stop;
1353	}
1354	ret = __get_victim(sbi, &segno, gc_type);
1355	if (ret)
1356		goto stop;
1357
1358	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1359	if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1360		sec_freed++;
1361	total_freed += seg_freed;
1362
1363	if (gc_type == FG_GC) {
1364		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1365						sbi->skipped_gc_rwsem)
1366			skipped_round++;
1367		last_skipped = sbi->skipped_atomic_files[FG_GC];
1368		round++;
1369	}
1370
1371	if (gc_type == FG_GC && seg_freed)
1372		sbi->cur_victim_sec = NULL_SEGNO;
1373
1374	if (sync)
1375		goto stop;
1376
1377	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1378		if (skipped_round <= MAX_SKIP_GC_COUNT ||
1379					skipped_round * 2 < round) {
1380			segno = NULL_SEGNO;
1381			goto gc_more;
1382		}
1383
1384		if (first_skipped < last_skipped &&
1385				(last_skipped - first_skipped) >
1386						sbi->skipped_gc_rwsem) {
1387			f2fs_drop_inmem_pages_all(sbi, true);
1388			segno = NULL_SEGNO;
1389			goto gc_more;
1390		}
1391		if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1392			ret = f2fs_write_checkpoint(sbi, &cpc);
1393	}
1394stop:
1395	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1396	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1397
1398	trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1399				get_pages(sbi, F2FS_DIRTY_NODES),
1400				get_pages(sbi, F2FS_DIRTY_DENTS),
1401				get_pages(sbi, F2FS_DIRTY_IMETA),
1402				free_sections(sbi),
1403				free_segments(sbi),
1404				reserved_segments(sbi),
1405				prefree_segments(sbi));
1406
1407	up_write(&sbi->gc_lock);
1408
1409	put_gc_inode(&gc_list);
1410
1411	if (sync && !ret)
1412		ret = sec_freed ? 0 : -EAGAIN;
1413	return ret;
1414}
1415
1416void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1417{
1418	DIRTY_I(sbi)->v_ops = &default_v_ops;
1419
1420	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1421
1422	/* give warm/cold data area from slower device */
1423	if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1424		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1425				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1426}
1427
1428static int free_segment_range(struct f2fs_sb_info *sbi,
1429				unsigned int secs, bool gc_only)
1430{
1431	unsigned int segno, next_inuse, start, end;
1432	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1433	int gc_mode, gc_type;
1434	int err = 0;
1435	int type;
1436
1437	/* Force block allocation for GC */
1438	MAIN_SECS(sbi) -= secs;
1439	start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1440	end = MAIN_SEGS(sbi) - 1;
1441
1442	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1443	for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1444		if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1445			SIT_I(sbi)->last_victim[gc_mode] = 0;
1446
1447	for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1448		if (sbi->next_victim_seg[gc_type] >= start)
1449			sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1450	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1451
1452	/* Move out cursegs from the target range */
1453	for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
1454		f2fs_allocate_segment_for_resize(sbi, type, start, end);
1455
1456	/* do GC to move out valid blocks in the range */
1457	for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1458		struct gc_inode_list gc_list = {
1459			.ilist = LIST_HEAD_INIT(gc_list.ilist),
1460			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1461		};
1462
1463		do_garbage_collect(sbi, segno, &gc_list, FG_GC);
1464		put_gc_inode(&gc_list);
1465
1466		if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1467			err = -EAGAIN;
1468			goto out;
1469		}
1470		if (fatal_signal_pending(current)) {
1471			err = -ERESTARTSYS;
1472			goto out;
1473		}
1474	}
1475	if (gc_only)
1476		goto out;
1477
1478	err = f2fs_write_checkpoint(sbi, &cpc);
1479	if (err)
1480		goto out;
1481
1482	next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1483	if (next_inuse <= end) {
1484		f2fs_err(sbi, "segno %u should be free but still inuse!",
1485			 next_inuse);
1486		f2fs_bug_on(sbi, 1);
1487	}
1488out:
1489	MAIN_SECS(sbi) += secs;
1490	return err;
1491}
1492
1493static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1494{
1495	struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1496	int section_count;
1497	int segment_count;
1498	int segment_count_main;
1499	long long block_count;
1500	int segs = secs * sbi->segs_per_sec;
1501
1502	down_write(&sbi->sb_lock);
1503
1504	section_count = le32_to_cpu(raw_sb->section_count);
1505	segment_count = le32_to_cpu(raw_sb->segment_count);
1506	segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1507	block_count = le64_to_cpu(raw_sb->block_count);
1508
1509	raw_sb->section_count = cpu_to_le32(section_count + secs);
1510	raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1511	raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1512	raw_sb->block_count = cpu_to_le64(block_count +
1513					(long long)segs * sbi->blocks_per_seg);
1514	if (f2fs_is_multi_device(sbi)) {
1515		int last_dev = sbi->s_ndevs - 1;
1516		int dev_segs =
1517			le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1518
1519		raw_sb->devs[last_dev].total_segments =
1520						cpu_to_le32(dev_segs + segs);
1521	}
1522
1523	up_write(&sbi->sb_lock);
1524}
1525
1526static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1527{
1528	int segs = secs * sbi->segs_per_sec;
1529	long long blks = (long long)segs * sbi->blocks_per_seg;
1530	long long user_block_count =
1531				le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1532
1533	SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1534	MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1535	MAIN_SECS(sbi) += secs;
1536	FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1537	FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1538	F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1539
1540	if (f2fs_is_multi_device(sbi)) {
1541		int last_dev = sbi->s_ndevs - 1;
1542
1543		FDEV(last_dev).total_segments =
1544				(int)FDEV(last_dev).total_segments + segs;
1545		FDEV(last_dev).end_blk =
1546				(long long)FDEV(last_dev).end_blk + blks;
1547#ifdef CONFIG_BLK_DEV_ZONED
1548		FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1549					(int)(blks >> sbi->log_blocks_per_blkz);
1550#endif
1551	}
1552}
1553
1554int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1555{
1556	__u64 old_block_count, shrunk_blocks;
1557	struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1558	unsigned int secs;
1559	int err = 0;
1560	__u32 rem;
1561
1562	old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1563	if (block_count > old_block_count)
1564		return -EINVAL;
1565
1566	if (f2fs_is_multi_device(sbi)) {
1567		int last_dev = sbi->s_ndevs - 1;
1568		__u64 last_segs = FDEV(last_dev).total_segments;
1569
1570		if (block_count + last_segs * sbi->blocks_per_seg <=
1571								old_block_count)
1572			return -EINVAL;
1573	}
1574
1575	/* new fs size should align to section size */
1576	div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1577	if (rem)
1578		return -EINVAL;
1579
1580	if (block_count == old_block_count)
1581		return 0;
1582
1583	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1584		f2fs_err(sbi, "Should run fsck to repair first.");
1585		return -EFSCORRUPTED;
1586	}
1587
1588	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1589		f2fs_err(sbi, "Checkpoint should be enabled.");
1590		return -EINVAL;
1591	}
1592
1593	shrunk_blocks = old_block_count - block_count;
1594	secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
1595
1596	/* stop other GC */
1597	if (!down_write_trylock(&sbi->gc_lock))
1598		return -EAGAIN;
1599
1600	/* stop CP to protect MAIN_SEC in free_segment_range */
1601	f2fs_lock_op(sbi);
1602	err = free_segment_range(sbi, secs, true);
1603	f2fs_unlock_op(sbi);
1604	up_write(&sbi->gc_lock);
1605	if (err)
1606		return err;
1607
1608	set_sbi_flag(sbi, SBI_IS_RESIZEFS);
1609
1610	freeze_super(sbi->sb);
1611	down_write(&sbi->gc_lock);
1612	mutex_lock(&sbi->cp_mutex);
1613
1614	spin_lock(&sbi->stat_lock);
1615	if (shrunk_blocks + valid_user_blocks(sbi) +
1616		sbi->current_reserved_blocks + sbi->unusable_block_count +
1617		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
1618		err = -ENOSPC;
1619	else
1620		sbi->user_block_count -= shrunk_blocks;
1621	spin_unlock(&sbi->stat_lock);
1622	if (err)
1623		goto out_err;
1624
1625	err = free_segment_range(sbi, secs, false);
1626	if (err)
1627		goto recover_out;
1628
1629	update_sb_metadata(sbi, -secs);
1630
1631	err = f2fs_commit_super(sbi, false);
1632	if (err) {
1633		update_sb_metadata(sbi, secs);
1634		goto recover_out;
1635	}
1636
1637	update_fs_metadata(sbi, -secs);
1638	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1639	set_sbi_flag(sbi, SBI_IS_DIRTY);
1640
1641	err = f2fs_write_checkpoint(sbi, &cpc);
1642	if (err) {
1643		update_fs_metadata(sbi, secs);
1644		update_sb_metadata(sbi, secs);
1645		f2fs_commit_super(sbi, false);
1646	}
1647recover_out:
1648	if (err) {
1649		set_sbi_flag(sbi, SBI_NEED_FSCK);
1650		f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
1651
1652		spin_lock(&sbi->stat_lock);
1653		sbi->user_block_count += shrunk_blocks;
1654		spin_unlock(&sbi->stat_lock);
1655	}
1656out_err:
1657	mutex_unlock(&sbi->cp_mutex);
1658	up_write(&sbi->gc_lock);
1659	thaw_super(sbi->sb);
1660	clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1661	return err;
1662}