Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/segment.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/bio.h>
  11#include <linux/blkdev.h>
  12#include <linux/sched/mm.h>
  13#include <linux/prefetch.h>
  14#include <linux/kthread.h>
  15#include <linux/swap.h>
  16#include <linux/timer.h>
  17#include <linux/freezer.h>
  18#include <linux/sched/signal.h>
  19#include <linux/random.h>
  20
  21#include "f2fs.h"
  22#include "segment.h"
  23#include "node.h"
  24#include "gc.h"
  25#include "iostat.h"
  26#include <trace/events/f2fs.h>
  27
  28#define __reverse_ffz(x) __reverse_ffs(~(x))
  29
  30static struct kmem_cache *discard_entry_slab;
  31static struct kmem_cache *discard_cmd_slab;
  32static struct kmem_cache *sit_entry_set_slab;
  33static struct kmem_cache *revoke_entry_slab;
  34
  35static unsigned long __reverse_ulong(unsigned char *str)
  36{
  37	unsigned long tmp = 0;
  38	int shift = 24, idx = 0;
  39
  40#if BITS_PER_LONG == 64
  41	shift = 56;
  42#endif
  43	while (shift >= 0) {
  44		tmp |= (unsigned long)str[idx++] << shift;
  45		shift -= BITS_PER_BYTE;
  46	}
  47	return tmp;
  48}
  49
  50/*
  51 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
  52 * MSB and LSB are reversed in a byte by f2fs_set_bit.
  53 */
  54static inline unsigned long __reverse_ffs(unsigned long word)
  55{
  56	int num = 0;
  57
  58#if BITS_PER_LONG == 64
  59	if ((word & 0xffffffff00000000UL) == 0)
  60		num += 32;
  61	else
  62		word >>= 32;
  63#endif
  64	if ((word & 0xffff0000) == 0)
  65		num += 16;
  66	else
  67		word >>= 16;
  68
  69	if ((word & 0xff00) == 0)
  70		num += 8;
  71	else
  72		word >>= 8;
  73
  74	if ((word & 0xf0) == 0)
  75		num += 4;
  76	else
  77		word >>= 4;
  78
  79	if ((word & 0xc) == 0)
  80		num += 2;
  81	else
  82		word >>= 2;
  83
  84	if ((word & 0x2) == 0)
  85		num += 1;
  86	return num;
  87}
  88
  89/*
  90 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  91 * f2fs_set_bit makes MSB and LSB reversed in a byte.
  92 * @size must be integral times of unsigned long.
  93 * Example:
  94 *                             MSB <--> LSB
  95 *   f2fs_set_bit(0, bitmap) => 1000 0000
  96 *   f2fs_set_bit(7, bitmap) => 0000 0001
  97 */
  98static unsigned long __find_rev_next_bit(const unsigned long *addr,
  99			unsigned long size, unsigned long offset)
 100{
 101	const unsigned long *p = addr + BIT_WORD(offset);
 102	unsigned long result = size;
 103	unsigned long tmp;
 104
 105	if (offset >= size)
 106		return size;
 107
 108	size -= (offset & ~(BITS_PER_LONG - 1));
 109	offset %= BITS_PER_LONG;
 110
 111	while (1) {
 112		if (*p == 0)
 113			goto pass;
 114
 115		tmp = __reverse_ulong((unsigned char *)p);
 116
 117		tmp &= ~0UL >> offset;
 118		if (size < BITS_PER_LONG)
 119			tmp &= (~0UL << (BITS_PER_LONG - size));
 120		if (tmp)
 121			goto found;
 122pass:
 123		if (size <= BITS_PER_LONG)
 124			break;
 125		size -= BITS_PER_LONG;
 126		offset = 0;
 127		p++;
 128	}
 129	return result;
 130found:
 131	return result - size + __reverse_ffs(tmp);
 132}
 133
 134static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
 135			unsigned long size, unsigned long offset)
 136{
 137	const unsigned long *p = addr + BIT_WORD(offset);
 138	unsigned long result = size;
 139	unsigned long tmp;
 140
 141	if (offset >= size)
 142		return size;
 143
 144	size -= (offset & ~(BITS_PER_LONG - 1));
 145	offset %= BITS_PER_LONG;
 146
 147	while (1) {
 148		if (*p == ~0UL)
 149			goto pass;
 150
 151		tmp = __reverse_ulong((unsigned char *)p);
 152
 153		if (offset)
 154			tmp |= ~0UL << (BITS_PER_LONG - offset);
 155		if (size < BITS_PER_LONG)
 156			tmp |= ~0UL >> size;
 157		if (tmp != ~0UL)
 158			goto found;
 159pass:
 160		if (size <= BITS_PER_LONG)
 161			break;
 162		size -= BITS_PER_LONG;
 163		offset = 0;
 164		p++;
 165	}
 166	return result;
 167found:
 168	return result - size + __reverse_ffz(tmp);
 169}
 170
 171bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
 172{
 173	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
 174	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
 175	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
 176
 177	if (f2fs_lfs_mode(sbi))
 178		return false;
 179	if (sbi->gc_mode == GC_URGENT_HIGH)
 180		return true;
 181	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
 182		return true;
 183
 184	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
 185			SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
 186}
 187
 188void f2fs_abort_atomic_write(struct inode *inode, bool clean)
 189{
 190	struct f2fs_inode_info *fi = F2FS_I(inode);
 191
 192	if (!f2fs_is_atomic_file(inode))
 193		return;
 194
 195	if (clean)
 196		truncate_inode_pages_final(inode->i_mapping);
 197
 198	release_atomic_write_cnt(inode);
 199	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
 200	clear_inode_flag(inode, FI_ATOMIC_REPLACE);
 201	clear_inode_flag(inode, FI_ATOMIC_FILE);
 202	stat_dec_atomic_inode(inode);
 203
 204	F2FS_I(inode)->atomic_write_task = NULL;
 205
 206	if (clean) {
 207		f2fs_i_size_write(inode, fi->original_i_size);
 208		fi->original_i_size = 0;
 209	}
 210	/* avoid stale dirty inode during eviction */
 211	sync_inode_metadata(inode, 0);
 212}
 213
 214static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
 215			block_t new_addr, block_t *old_addr, bool recover)
 216{
 217	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 218	struct dnode_of_data dn;
 219	struct node_info ni;
 220	int err;
 221
 222retry:
 223	set_new_dnode(&dn, inode, NULL, NULL, 0);
 224	err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
 225	if (err) {
 226		if (err == -ENOMEM) {
 227			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
 228			goto retry;
 229		}
 230		return err;
 231	}
 232
 233	err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
 234	if (err) {
 235		f2fs_put_dnode(&dn);
 236		return err;
 237	}
 238
 239	if (recover) {
 240		/* dn.data_blkaddr is always valid */
 241		if (!__is_valid_data_blkaddr(new_addr)) {
 242			if (new_addr == NULL_ADDR)
 243				dec_valid_block_count(sbi, inode, 1);
 244			f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
 245			f2fs_update_data_blkaddr(&dn, new_addr);
 246		} else {
 247			f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
 248				new_addr, ni.version, true, true);
 249		}
 250	} else {
 251		blkcnt_t count = 1;
 252
 253		err = inc_valid_block_count(sbi, inode, &count, true);
 254		if (err) {
 255			f2fs_put_dnode(&dn);
 256			return err;
 257		}
 258
 259		*old_addr = dn.data_blkaddr;
 260		f2fs_truncate_data_blocks_range(&dn, 1);
 261		dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
 262
 263		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
 264					ni.version, true, false);
 265	}
 266
 267	f2fs_put_dnode(&dn);
 268
 269	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
 270			index, old_addr ? *old_addr : 0, new_addr, recover);
 271	return 0;
 272}
 273
 274static void __complete_revoke_list(struct inode *inode, struct list_head *head,
 275					bool revoke)
 276{
 277	struct revoke_entry *cur, *tmp;
 278	pgoff_t start_index = 0;
 279	bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
 280
 281	list_for_each_entry_safe(cur, tmp, head, list) {
 282		if (revoke) {
 283			__replace_atomic_write_block(inode, cur->index,
 284						cur->old_addr, NULL, true);
 285		} else if (truncate) {
 286			f2fs_truncate_hole(inode, start_index, cur->index);
 287			start_index = cur->index + 1;
 288		}
 289
 290		list_del(&cur->list);
 291		kmem_cache_free(revoke_entry_slab, cur);
 292	}
 293
 294	if (!revoke && truncate)
 295		f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
 296}
 297
 298static int __f2fs_commit_atomic_write(struct inode *inode)
 299{
 300	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 301	struct f2fs_inode_info *fi = F2FS_I(inode);
 302	struct inode *cow_inode = fi->cow_inode;
 303	struct revoke_entry *new;
 304	struct list_head revoke_list;
 305	block_t blkaddr;
 306	struct dnode_of_data dn;
 307	pgoff_t len = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 308	pgoff_t off = 0, blen, index;
 309	int ret = 0, i;
 310
 311	INIT_LIST_HEAD(&revoke_list);
 312
 313	while (len) {
 314		blen = min_t(pgoff_t, ADDRS_PER_BLOCK(cow_inode), len);
 315
 316		set_new_dnode(&dn, cow_inode, NULL, NULL, 0);
 317		ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
 318		if (ret && ret != -ENOENT) {
 319			goto out;
 320		} else if (ret == -ENOENT) {
 321			ret = 0;
 322			if (dn.max_level == 0)
 323				goto out;
 324			goto next;
 325		}
 326
 327		blen = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, cow_inode),
 328				len);
 329		index = off;
 330		for (i = 0; i < blen; i++, dn.ofs_in_node++, index++) {
 331			blkaddr = f2fs_data_blkaddr(&dn);
 332
 333			if (!__is_valid_data_blkaddr(blkaddr)) {
 334				continue;
 335			} else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
 336					DATA_GENERIC_ENHANCE)) {
 337				f2fs_put_dnode(&dn);
 338				ret = -EFSCORRUPTED;
 339				goto out;
 340			}
 341
 342			new = f2fs_kmem_cache_alloc(revoke_entry_slab, GFP_NOFS,
 343							true, NULL);
 344
 345			ret = __replace_atomic_write_block(inode, index, blkaddr,
 346							&new->old_addr, false);
 347			if (ret) {
 348				f2fs_put_dnode(&dn);
 349				kmem_cache_free(revoke_entry_slab, new);
 350				goto out;
 351			}
 352
 353			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
 354			new->index = index;
 355			list_add_tail(&new->list, &revoke_list);
 356		}
 357		f2fs_put_dnode(&dn);
 358next:
 359		off += blen;
 360		len -= blen;
 361	}
 362
 363out:
 364	if (ret) {
 365		sbi->revoked_atomic_block += fi->atomic_write_cnt;
 366	} else {
 367		sbi->committed_atomic_block += fi->atomic_write_cnt;
 368		set_inode_flag(inode, FI_ATOMIC_COMMITTED);
 369	}
 370
 371	__complete_revoke_list(inode, &revoke_list, ret ? true : false);
 372
 373	return ret;
 374}
 375
 376int f2fs_commit_atomic_write(struct inode *inode)
 377{
 378	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 379	struct f2fs_inode_info *fi = F2FS_I(inode);
 380	int err;
 381
 382	err = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
 383	if (err)
 384		return err;
 385
 386	f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
 387	f2fs_lock_op(sbi);
 388
 389	err = __f2fs_commit_atomic_write(inode);
 390
 391	f2fs_unlock_op(sbi);
 392	f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
 393
 394	return err;
 395}
 396
 397/*
 398 * This function balances dirty node and dentry pages.
 399 * In addition, it controls garbage collection.
 400 */
 401void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 402{
 403	if (f2fs_cp_error(sbi))
 404		return;
 405
 406	if (time_to_inject(sbi, FAULT_CHECKPOINT))
 407		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
 408
 409	/* balance_fs_bg is able to be pending */
 410	if (need && excess_cached_nats(sbi))
 411		f2fs_balance_fs_bg(sbi, false);
 412
 413	if (!f2fs_is_checkpoint_ready(sbi))
 414		return;
 415
 416	/*
 417	 * We should do GC or end up with checkpoint, if there are so many dirty
 418	 * dir/node pages without enough free segments.
 419	 */
 420	if (has_enough_free_secs(sbi, 0, 0))
 421		return;
 422
 423	if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
 424				sbi->gc_thread->f2fs_gc_task) {
 425		DEFINE_WAIT(wait);
 426
 427		prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
 428					TASK_UNINTERRUPTIBLE);
 429		wake_up(&sbi->gc_thread->gc_wait_queue_head);
 430		io_schedule();
 431		finish_wait(&sbi->gc_thread->fggc_wq, &wait);
 432	} else {
 433		struct f2fs_gc_control gc_control = {
 434			.victim_segno = NULL_SEGNO,
 435			.init_gc_type = BG_GC,
 436			.no_bg_gc = true,
 437			.should_migrate_blocks = false,
 438			.err_gc_skipped = false,
 439			.nr_free_secs = 1 };
 440		f2fs_down_write(&sbi->gc_lock);
 441		stat_inc_gc_call_count(sbi, FOREGROUND);
 442		f2fs_gc(sbi, &gc_control);
 443	}
 444}
 445
 446static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
 447{
 448	int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
 449	unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
 450	unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
 451	unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
 452	unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
 453	unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
 454	unsigned int threshold =
 455		SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD));
 456	unsigned int global_threshold = threshold * 3 / 2;
 457
 458	if (dents >= threshold || qdata >= threshold ||
 459		nodes >= threshold || meta >= threshold ||
 460		imeta >= threshold)
 461		return true;
 462	return dents + qdata + nodes + meta + imeta >  global_threshold;
 463}
 464
 465void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
 466{
 467	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 468		return;
 469
 470	/* try to shrink extent cache when there is no enough memory */
 471	if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
 472		f2fs_shrink_read_extent_tree(sbi,
 473				READ_EXTENT_CACHE_SHRINK_NUMBER);
 474
 475	/* try to shrink age extent cache when there is no enough memory */
 476	if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
 477		f2fs_shrink_age_extent_tree(sbi,
 478				AGE_EXTENT_CACHE_SHRINK_NUMBER);
 479
 480	/* check the # of cached NAT entries */
 481	if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
 482		f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
 483
 484	if (!f2fs_available_free_memory(sbi, FREE_NIDS))
 485		f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
 486	else
 487		f2fs_build_free_nids(sbi, false, false);
 488
 489	if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
 490		excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
 491		goto do_sync;
 492
 493	/* there is background inflight IO or foreground operation recently */
 494	if (is_inflight_io(sbi, REQ_TIME) ||
 495		(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
 496		return;
 497
 498	/* exceed periodical checkpoint timeout threshold */
 499	if (f2fs_time_over(sbi, CP_TIME))
 500		goto do_sync;
 501
 502	/* checkpoint is the only way to shrink partial cached entries */
 503	if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
 504		f2fs_available_free_memory(sbi, INO_ENTRIES))
 505		return;
 506
 507do_sync:
 508	if (test_opt(sbi, DATA_FLUSH) && from_bg) {
 509		struct blk_plug plug;
 510
 511		mutex_lock(&sbi->flush_lock);
 512
 513		blk_start_plug(&plug);
 514		f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
 515		blk_finish_plug(&plug);
 516
 517		mutex_unlock(&sbi->flush_lock);
 518	}
 519	stat_inc_cp_call_count(sbi, BACKGROUND);
 520	f2fs_sync_fs(sbi->sb, 1);
 521}
 522
 523static int __submit_flush_wait(struct f2fs_sb_info *sbi,
 524				struct block_device *bdev)
 525{
 526	int ret = blkdev_issue_flush(bdev);
 527
 528	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
 529				test_opt(sbi, FLUSH_MERGE), ret);
 530	if (!ret)
 531		f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
 532	return ret;
 533}
 534
 535static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
 536{
 537	int ret = 0;
 538	int i;
 539
 540	if (!f2fs_is_multi_device(sbi))
 541		return __submit_flush_wait(sbi, sbi->sb->s_bdev);
 542
 543	for (i = 0; i < sbi->s_ndevs; i++) {
 544		if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
 545			continue;
 546		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
 547		if (ret)
 548			break;
 549	}
 550	return ret;
 551}
 552
 553static int issue_flush_thread(void *data)
 554{
 555	struct f2fs_sb_info *sbi = data;
 556	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
 557	wait_queue_head_t *q = &fcc->flush_wait_queue;
 558repeat:
 559	if (kthread_should_stop())
 560		return 0;
 561
 562	if (!llist_empty(&fcc->issue_list)) {
 563		struct flush_cmd *cmd, *next;
 564		int ret;
 565
 566		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
 567		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 568
 569		cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
 570
 571		ret = submit_flush_wait(sbi, cmd->ino);
 572		atomic_inc(&fcc->issued_flush);
 573
 574		llist_for_each_entry_safe(cmd, next,
 575					  fcc->dispatch_list, llnode) {
 576			cmd->ret = ret;
 577			complete(&cmd->wait);
 578		}
 579		fcc->dispatch_list = NULL;
 580	}
 581
 582	wait_event_interruptible(*q,
 583		kthread_should_stop() || !llist_empty(&fcc->issue_list));
 584	goto repeat;
 585}
 586
 587int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
 588{
 589	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
 590	struct flush_cmd cmd;
 591	int ret;
 592
 593	if (test_opt(sbi, NOBARRIER))
 594		return 0;
 595
 596	if (!test_opt(sbi, FLUSH_MERGE)) {
 597		atomic_inc(&fcc->queued_flush);
 598		ret = submit_flush_wait(sbi, ino);
 599		atomic_dec(&fcc->queued_flush);
 600		atomic_inc(&fcc->issued_flush);
 601		return ret;
 602	}
 603
 604	if (atomic_inc_return(&fcc->queued_flush) == 1 ||
 605	    f2fs_is_multi_device(sbi)) {
 606		ret = submit_flush_wait(sbi, ino);
 607		atomic_dec(&fcc->queued_flush);
 608
 609		atomic_inc(&fcc->issued_flush);
 610		return ret;
 611	}
 612
 613	cmd.ino = ino;
 614	init_completion(&cmd.wait);
 615
 616	llist_add(&cmd.llnode, &fcc->issue_list);
 617
 618	/*
 619	 * update issue_list before we wake up issue_flush thread, this
 620	 * smp_mb() pairs with another barrier in ___wait_event(), see
 621	 * more details in comments of waitqueue_active().
 622	 */
 623	smp_mb();
 624
 625	if (waitqueue_active(&fcc->flush_wait_queue))
 626		wake_up(&fcc->flush_wait_queue);
 627
 628	if (fcc->f2fs_issue_flush) {
 629		wait_for_completion(&cmd.wait);
 630		atomic_dec(&fcc->queued_flush);
 631	} else {
 632		struct llist_node *list;
 633
 634		list = llist_del_all(&fcc->issue_list);
 635		if (!list) {
 636			wait_for_completion(&cmd.wait);
 637			atomic_dec(&fcc->queued_flush);
 638		} else {
 639			struct flush_cmd *tmp, *next;
 640
 641			ret = submit_flush_wait(sbi, ino);
 642
 643			llist_for_each_entry_safe(tmp, next, list, llnode) {
 644				if (tmp == &cmd) {
 645					cmd.ret = ret;
 646					atomic_dec(&fcc->queued_flush);
 647					continue;
 648				}
 649				tmp->ret = ret;
 650				complete(&tmp->wait);
 651			}
 652		}
 653	}
 654
 655	return cmd.ret;
 656}
 657
 658int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
 659{
 660	dev_t dev = sbi->sb->s_bdev->bd_dev;
 661	struct flush_cmd_control *fcc;
 662
 663	if (SM_I(sbi)->fcc_info) {
 664		fcc = SM_I(sbi)->fcc_info;
 665		if (fcc->f2fs_issue_flush)
 666			return 0;
 667		goto init_thread;
 668	}
 669
 670	fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
 671	if (!fcc)
 672		return -ENOMEM;
 673	atomic_set(&fcc->issued_flush, 0);
 674	atomic_set(&fcc->queued_flush, 0);
 675	init_waitqueue_head(&fcc->flush_wait_queue);
 676	init_llist_head(&fcc->issue_list);
 677	SM_I(sbi)->fcc_info = fcc;
 678	if (!test_opt(sbi, FLUSH_MERGE))
 679		return 0;
 680
 681init_thread:
 682	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
 683				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
 684	if (IS_ERR(fcc->f2fs_issue_flush)) {
 685		int err = PTR_ERR(fcc->f2fs_issue_flush);
 686
 687		fcc->f2fs_issue_flush = NULL;
 688		return err;
 689	}
 690
 691	return 0;
 692}
 693
 694void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
 695{
 696	struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
 697
 698	if (fcc && fcc->f2fs_issue_flush) {
 699		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
 700
 701		fcc->f2fs_issue_flush = NULL;
 702		kthread_stop(flush_thread);
 703	}
 704	if (free) {
 705		kfree(fcc);
 706		SM_I(sbi)->fcc_info = NULL;
 707	}
 708}
 709
 710int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
 711{
 712	int ret = 0, i;
 713
 714	if (!f2fs_is_multi_device(sbi))
 715		return 0;
 716
 717	if (test_opt(sbi, NOBARRIER))
 718		return 0;
 719
 720	for (i = 1; i < sbi->s_ndevs; i++) {
 721		int count = DEFAULT_RETRY_IO_COUNT;
 722
 723		if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
 724			continue;
 725
 726		do {
 727			ret = __submit_flush_wait(sbi, FDEV(i).bdev);
 728			if (ret)
 729				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
 730		} while (ret && --count);
 731
 732		if (ret) {
 733			f2fs_stop_checkpoint(sbi, false,
 734					STOP_CP_REASON_FLUSH_FAIL);
 735			break;
 736		}
 737
 738		spin_lock(&sbi->dev_lock);
 739		f2fs_clear_bit(i, (char *)&sbi->dirty_device);
 740		spin_unlock(&sbi->dev_lock);
 741	}
 742
 743	return ret;
 744}
 745
 746static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
 747		enum dirty_type dirty_type)
 748{
 749	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 750
 751	/* need not be added */
 752	if (IS_CURSEG(sbi, segno))
 753		return;
 754
 755	if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
 756		dirty_i->nr_dirty[dirty_type]++;
 757
 758	if (dirty_type == DIRTY) {
 759		struct seg_entry *sentry = get_seg_entry(sbi, segno);
 760		enum dirty_type t = sentry->type;
 761
 762		if (unlikely(t >= DIRTY)) {
 763			f2fs_bug_on(sbi, 1);
 764			return;
 765		}
 766		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
 767			dirty_i->nr_dirty[t]++;
 768
 769		if (__is_large_section(sbi)) {
 770			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 771			block_t valid_blocks =
 772				get_valid_blocks(sbi, segno, true);
 773
 774			f2fs_bug_on(sbi, unlikely(!valid_blocks ||
 775					valid_blocks == CAP_BLKS_PER_SEC(sbi)));
 776
 777			if (!IS_CURSEC(sbi, secno))
 778				set_bit(secno, dirty_i->dirty_secmap);
 779		}
 780	}
 781}
 782
 783static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
 784		enum dirty_type dirty_type)
 785{
 786	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 787	block_t valid_blocks;
 788
 789	if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
 790		dirty_i->nr_dirty[dirty_type]--;
 791
 792	if (dirty_type == DIRTY) {
 793		struct seg_entry *sentry = get_seg_entry(sbi, segno);
 794		enum dirty_type t = sentry->type;
 795
 796		if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
 797			dirty_i->nr_dirty[t]--;
 798
 799		valid_blocks = get_valid_blocks(sbi, segno, true);
 800		if (valid_blocks == 0) {
 801			clear_bit(GET_SEC_FROM_SEG(sbi, segno),
 802						dirty_i->victim_secmap);
 803#ifdef CONFIG_F2FS_CHECK_FS
 804			clear_bit(segno, SIT_I(sbi)->invalid_segmap);
 805#endif
 806		}
 807		if (__is_large_section(sbi)) {
 808			unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
 809
 810			if (!valid_blocks ||
 811					valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
 812				clear_bit(secno, dirty_i->dirty_secmap);
 813				return;
 814			}
 815
 816			if (!IS_CURSEC(sbi, secno))
 817				set_bit(secno, dirty_i->dirty_secmap);
 818		}
 819	}
 820}
 821
 822/*
 823 * Should not occur error such as -ENOMEM.
 824 * Adding dirty entry into seglist is not critical operation.
 825 * If a given segment is one of current working segments, it won't be added.
 826 */
 827static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
 828{
 829	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 830	unsigned short valid_blocks, ckpt_valid_blocks;
 831	unsigned int usable_blocks;
 832
 833	if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
 834		return;
 835
 836	usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
 837	mutex_lock(&dirty_i->seglist_lock);
 838
 839	valid_blocks = get_valid_blocks(sbi, segno, false);
 840	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
 841
 842	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
 843		ckpt_valid_blocks == usable_blocks)) {
 844		__locate_dirty_segment(sbi, segno, PRE);
 845		__remove_dirty_segment(sbi, segno, DIRTY);
 846	} else if (valid_blocks < usable_blocks) {
 847		__locate_dirty_segment(sbi, segno, DIRTY);
 848	} else {
 849		/* Recovery routine with SSR needs this */
 850		__remove_dirty_segment(sbi, segno, DIRTY);
 851	}
 852
 853	mutex_unlock(&dirty_i->seglist_lock);
 854}
 855
 856/* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
 857void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
 858{
 859	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 860	unsigned int segno;
 861
 862	mutex_lock(&dirty_i->seglist_lock);
 863	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
 864		if (get_valid_blocks(sbi, segno, false))
 865			continue;
 866		if (IS_CURSEG(sbi, segno))
 867			continue;
 868		__locate_dirty_segment(sbi, segno, PRE);
 869		__remove_dirty_segment(sbi, segno, DIRTY);
 870	}
 871	mutex_unlock(&dirty_i->seglist_lock);
 872}
 873
 874block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
 875{
 876	int ovp_hole_segs =
 877		(overprovision_segments(sbi) - reserved_segments(sbi));
 878	block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs);
 879	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 880	block_t holes[2] = {0, 0};	/* DATA and NODE */
 881	block_t unusable;
 882	struct seg_entry *se;
 883	unsigned int segno;
 884
 885	mutex_lock(&dirty_i->seglist_lock);
 886	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
 887		se = get_seg_entry(sbi, segno);
 888		if (IS_NODESEG(se->type))
 889			holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
 890							se->valid_blocks;
 891		else
 892			holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
 893							se->valid_blocks;
 894	}
 895	mutex_unlock(&dirty_i->seglist_lock);
 896
 897	unusable = max(holes[DATA], holes[NODE]);
 898	if (unusable > ovp_holes)
 899		return unusable - ovp_holes;
 900	return 0;
 901}
 902
 903int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
 904{
 905	int ovp_hole_segs =
 906		(overprovision_segments(sbi) - reserved_segments(sbi));
 907
 908	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
 909		return 0;
 910	if (unusable > F2FS_OPTION(sbi).unusable_cap)
 911		return -EAGAIN;
 912	if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
 913		dirty_segments(sbi) > ovp_hole_segs)
 914		return -EAGAIN;
 915	if (has_not_enough_free_secs(sbi, 0, 0))
 916		return -EAGAIN;
 917	return 0;
 918}
 919
 920/* This is only used by SBI_CP_DISABLED */
 921static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
 922{
 923	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 924	unsigned int segno = 0;
 925
 926	mutex_lock(&dirty_i->seglist_lock);
 927	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
 928		if (get_valid_blocks(sbi, segno, false))
 929			continue;
 930		if (get_ckpt_valid_blocks(sbi, segno, false))
 931			continue;
 932		mutex_unlock(&dirty_i->seglist_lock);
 933		return segno;
 934	}
 935	mutex_unlock(&dirty_i->seglist_lock);
 936	return NULL_SEGNO;
 937}
 938
 939static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
 940		struct block_device *bdev, block_t lstart,
 941		block_t start, block_t len)
 942{
 943	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 944	struct list_head *pend_list;
 945	struct discard_cmd *dc;
 946
 947	f2fs_bug_on(sbi, !len);
 948
 949	pend_list = &dcc->pend_list[plist_idx(len)];
 950
 951	dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL);
 952	INIT_LIST_HEAD(&dc->list);
 953	dc->bdev = bdev;
 954	dc->di.lstart = lstart;
 955	dc->di.start = start;
 956	dc->di.len = len;
 957	dc->ref = 0;
 958	dc->state = D_PREP;
 959	dc->queued = 0;
 960	dc->error = 0;
 961	init_completion(&dc->wait);
 962	list_add_tail(&dc->list, pend_list);
 963	spin_lock_init(&dc->lock);
 964	dc->bio_ref = 0;
 965	atomic_inc(&dcc->discard_cmd_cnt);
 966	dcc->undiscard_blks += len;
 967
 968	return dc;
 969}
 970
 971static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
 972{
 973#ifdef CONFIG_F2FS_CHECK_FS
 974	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
 975	struct rb_node *cur = rb_first_cached(&dcc->root), *next;
 976	struct discard_cmd *cur_dc, *next_dc;
 977
 978	while (cur) {
 979		next = rb_next(cur);
 980		if (!next)
 981			return true;
 982
 983		cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
 984		next_dc = rb_entry(next, struct discard_cmd, rb_node);
 985
 986		if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) {
 987			f2fs_info(sbi, "broken discard_rbtree, "
 988				"cur(%u, %u) next(%u, %u)",
 989				cur_dc->di.lstart, cur_dc->di.len,
 990				next_dc->di.lstart, next_dc->di.len);
 991			return false;
 992		}
 993		cur = next;
 994	}
 995#endif
 996	return true;
 997}
 998
 999static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
1000						block_t blkaddr)
1001{
1002	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1003	struct rb_node *node = dcc->root.rb_root.rb_node;
1004	struct discard_cmd *dc;
1005
1006	while (node) {
1007		dc = rb_entry(node, struct discard_cmd, rb_node);
1008
1009		if (blkaddr < dc->di.lstart)
1010			node = node->rb_left;
1011		else if (blkaddr >= dc->di.lstart + dc->di.len)
1012			node = node->rb_right;
1013		else
1014			return dc;
1015	}
1016	return NULL;
1017}
1018
1019static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root,
1020				block_t blkaddr,
1021				struct discard_cmd **prev_entry,
1022				struct discard_cmd **next_entry,
1023				struct rb_node ***insert_p,
1024				struct rb_node **insert_parent)
1025{
1026	struct rb_node **pnode = &root->rb_root.rb_node;
1027	struct rb_node *parent = NULL, *tmp_node;
1028	struct discard_cmd *dc;
1029
1030	*insert_p = NULL;
1031	*insert_parent = NULL;
1032	*prev_entry = NULL;
1033	*next_entry = NULL;
1034
1035	if (RB_EMPTY_ROOT(&root->rb_root))
1036		return NULL;
1037
1038	while (*pnode) {
1039		parent = *pnode;
1040		dc = rb_entry(*pnode, struct discard_cmd, rb_node);
1041
1042		if (blkaddr < dc->di.lstart)
1043			pnode = &(*pnode)->rb_left;
1044		else if (blkaddr >= dc->di.lstart + dc->di.len)
1045			pnode = &(*pnode)->rb_right;
1046		else
1047			goto lookup_neighbors;
1048	}
1049
1050	*insert_p = pnode;
1051	*insert_parent = parent;
1052
1053	dc = rb_entry(parent, struct discard_cmd, rb_node);
1054	tmp_node = parent;
1055	if (parent && blkaddr > dc->di.lstart)
1056		tmp_node = rb_next(parent);
1057	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1058
1059	tmp_node = parent;
1060	if (parent && blkaddr < dc->di.lstart)
1061		tmp_node = rb_prev(parent);
1062	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1063	return NULL;
1064
1065lookup_neighbors:
1066	/* lookup prev node for merging backward later */
1067	tmp_node = rb_prev(&dc->rb_node);
1068	*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1069
1070	/* lookup next node for merging frontward later */
1071	tmp_node = rb_next(&dc->rb_node);
1072	*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
1073	return dc;
1074}
1075
1076static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1077							struct discard_cmd *dc)
1078{
1079	if (dc->state == D_DONE)
1080		atomic_sub(dc->queued, &dcc->queued_discard);
1081
1082	list_del(&dc->list);
1083	rb_erase_cached(&dc->rb_node, &dcc->root);
1084	dcc->undiscard_blks -= dc->di.len;
1085
1086	kmem_cache_free(discard_cmd_slab, dc);
1087
1088	atomic_dec(&dcc->discard_cmd_cnt);
1089}
1090
1091static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1092							struct discard_cmd *dc)
1093{
1094	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1095	unsigned long flags;
1096
1097	trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len);
1098
1099	spin_lock_irqsave(&dc->lock, flags);
1100	if (dc->bio_ref) {
1101		spin_unlock_irqrestore(&dc->lock, flags);
1102		return;
1103	}
1104	spin_unlock_irqrestore(&dc->lock, flags);
1105
1106	f2fs_bug_on(sbi, dc->ref);
1107
1108	if (dc->error == -EOPNOTSUPP)
1109		dc->error = 0;
1110
1111	if (dc->error)
1112		printk_ratelimited(
1113			"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1114			KERN_INFO, sbi->sb->s_id,
1115			dc->di.lstart, dc->di.start, dc->di.len, dc->error);
1116	__detach_discard_cmd(dcc, dc);
1117}
1118
1119static void f2fs_submit_discard_endio(struct bio *bio)
1120{
1121	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1122	unsigned long flags;
1123
1124	spin_lock_irqsave(&dc->lock, flags);
1125	if (!dc->error)
1126		dc->error = blk_status_to_errno(bio->bi_status);
1127	dc->bio_ref--;
1128	if (!dc->bio_ref && dc->state == D_SUBMIT) {
1129		dc->state = D_DONE;
1130		complete_all(&dc->wait);
1131	}
1132	spin_unlock_irqrestore(&dc->lock, flags);
1133	bio_put(bio);
1134}
1135
1136static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1137				block_t start, block_t end)
1138{
1139#ifdef CONFIG_F2FS_CHECK_FS
1140	struct seg_entry *sentry;
1141	unsigned int segno;
1142	block_t blk = start;
1143	unsigned long offset, size, *map;
1144
1145	while (blk < end) {
1146		segno = GET_SEGNO(sbi, blk);
1147		sentry = get_seg_entry(sbi, segno);
1148		offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1149
1150		if (end < START_BLOCK(sbi, segno + 1))
1151			size = GET_BLKOFF_FROM_SEG0(sbi, end);
1152		else
1153			size = BLKS_PER_SEG(sbi);
1154		map = (unsigned long *)(sentry->cur_valid_map);
1155		offset = __find_rev_next_bit(map, size, offset);
1156		f2fs_bug_on(sbi, offset != size);
1157		blk = START_BLOCK(sbi, segno + 1);
1158	}
1159#endif
1160}
1161
1162static void __init_discard_policy(struct f2fs_sb_info *sbi,
1163				struct discard_policy *dpolicy,
1164				int discard_type, unsigned int granularity)
1165{
1166	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1167
1168	/* common policy */
1169	dpolicy->type = discard_type;
1170	dpolicy->sync = true;
1171	dpolicy->ordered = false;
1172	dpolicy->granularity = granularity;
1173
1174	dpolicy->max_requests = dcc->max_discard_request;
1175	dpolicy->io_aware_gran = dcc->discard_io_aware_gran;
1176	dpolicy->timeout = false;
1177
1178	if (discard_type == DPOLICY_BG) {
1179		dpolicy->min_interval = dcc->min_discard_issue_time;
1180		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1181		dpolicy->max_interval = dcc->max_discard_issue_time;
1182		if (dcc->discard_io_aware == DPOLICY_IO_AWARE_ENABLE)
1183			dpolicy->io_aware = true;
1184		else if (dcc->discard_io_aware == DPOLICY_IO_AWARE_DISABLE)
1185			dpolicy->io_aware = false;
1186		dpolicy->sync = false;
1187		dpolicy->ordered = true;
1188		if (utilization(sbi) > dcc->discard_urgent_util) {
1189			dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1190			if (atomic_read(&dcc->discard_cmd_cnt))
1191				dpolicy->max_interval =
1192					dcc->min_discard_issue_time;
1193		}
1194	} else if (discard_type == DPOLICY_FORCE) {
1195		dpolicy->min_interval = dcc->min_discard_issue_time;
1196		dpolicy->mid_interval = dcc->mid_discard_issue_time;
1197		dpolicy->max_interval = dcc->max_discard_issue_time;
1198		dpolicy->io_aware = false;
1199	} else if (discard_type == DPOLICY_FSTRIM) {
1200		dpolicy->io_aware = false;
1201	} else if (discard_type == DPOLICY_UMOUNT) {
1202		dpolicy->io_aware = false;
1203		/* we need to issue all to keep CP_TRIMMED_FLAG */
1204		dpolicy->granularity = MIN_DISCARD_GRANULARITY;
1205		dpolicy->timeout = true;
1206	}
1207}
1208
1209static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1210				struct block_device *bdev, block_t lstart,
1211				block_t start, block_t len);
1212
1213#ifdef CONFIG_BLK_DEV_ZONED
1214static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
1215				   struct discard_cmd *dc, blk_opf_t flag,
1216				   struct list_head *wait_list,
1217				   unsigned int *issued)
1218{
1219	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1220	struct block_device *bdev = dc->bdev;
1221	struct bio *bio = bio_alloc(bdev, 0, REQ_OP_ZONE_RESET | flag, GFP_NOFS);
1222	unsigned long flags;
1223
1224	trace_f2fs_issue_reset_zone(bdev, dc->di.start);
1225
1226	spin_lock_irqsave(&dc->lock, flags);
1227	dc->state = D_SUBMIT;
1228	dc->bio_ref++;
1229	spin_unlock_irqrestore(&dc->lock, flags);
1230
1231	if (issued)
1232		(*issued)++;
1233
1234	atomic_inc(&dcc->queued_discard);
1235	dc->queued++;
1236	list_move_tail(&dc->list, wait_list);
1237
1238	/* sanity check on discard range */
1239	__check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
1240
1241	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(dc->di.start);
1242	bio->bi_private = dc;
1243	bio->bi_end_io = f2fs_submit_discard_endio;
1244	submit_bio(bio);
1245
1246	atomic_inc(&dcc->issued_discard);
1247	f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
1248}
1249#endif
1250
1251/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1252static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1253				struct discard_policy *dpolicy,
1254				struct discard_cmd *dc, int *issued)
1255{
1256	struct block_device *bdev = dc->bdev;
1257	unsigned int max_discard_blocks =
1258			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1259	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1260	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1261					&(dcc->fstrim_list) : &(dcc->wait_list);
1262	blk_opf_t flag = dpolicy->sync ? REQ_SYNC : 0;
1263	block_t lstart, start, len, total_len;
1264	int err = 0;
1265
1266	if (dc->state != D_PREP)
1267		return 0;
1268
1269	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1270		return 0;
1271
1272#ifdef CONFIG_BLK_DEV_ZONED
1273	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
1274		int devi = f2fs_bdev_index(sbi, bdev);
1275
1276		if (devi < 0)
1277			return -EINVAL;
1278
1279		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1280			__submit_zone_reset_cmd(sbi, dc, flag,
1281						wait_list, issued);
1282			return 0;
1283		}
1284	}
1285#endif
1286
1287	trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len);
1288
1289	lstart = dc->di.lstart;
1290	start = dc->di.start;
1291	len = dc->di.len;
1292	total_len = len;
1293
1294	dc->di.len = 0;
1295
1296	while (total_len && *issued < dpolicy->max_requests && !err) {
1297		struct bio *bio = NULL;
1298		unsigned long flags;
1299		bool last = true;
1300
1301		if (len > max_discard_blocks) {
1302			len = max_discard_blocks;
1303			last = false;
1304		}
1305
1306		(*issued)++;
1307		if (*issued == dpolicy->max_requests)
1308			last = true;
1309
1310		dc->di.len += len;
1311
1312		if (time_to_inject(sbi, FAULT_DISCARD)) {
1313			err = -EIO;
1314		} else {
1315			err = __blkdev_issue_discard(bdev,
1316					SECTOR_FROM_BLOCK(start),
1317					SECTOR_FROM_BLOCK(len),
1318					GFP_NOFS, &bio);
1319		}
1320		if (err) {
1321			spin_lock_irqsave(&dc->lock, flags);
1322			if (dc->state == D_PARTIAL)
1323				dc->state = D_SUBMIT;
1324			spin_unlock_irqrestore(&dc->lock, flags);
1325
1326			break;
1327		}
1328
1329		f2fs_bug_on(sbi, !bio);
1330
1331		/*
1332		 * should keep before submission to avoid D_DONE
1333		 * right away
1334		 */
1335		spin_lock_irqsave(&dc->lock, flags);
1336		if (last)
1337			dc->state = D_SUBMIT;
1338		else
1339			dc->state = D_PARTIAL;
1340		dc->bio_ref++;
1341		spin_unlock_irqrestore(&dc->lock, flags);
1342
1343		atomic_inc(&dcc->queued_discard);
1344		dc->queued++;
1345		list_move_tail(&dc->list, wait_list);
1346
1347		/* sanity check on discard range */
1348		__check_sit_bitmap(sbi, lstart, lstart + len);
1349
1350		bio->bi_private = dc;
1351		bio->bi_end_io = f2fs_submit_discard_endio;
1352		bio->bi_opf |= flag;
1353		submit_bio(bio);
1354
1355		atomic_inc(&dcc->issued_discard);
1356
1357		f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
1358
1359		lstart += len;
1360		start += len;
1361		total_len -= len;
1362		len = total_len;
1363	}
1364
1365	if (!err && len) {
1366		dcc->undiscard_blks -= len;
1367		__update_discard_tree_range(sbi, bdev, lstart, start, len);
1368	}
1369	return err;
1370}
1371
1372static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1373				struct block_device *bdev, block_t lstart,
1374				block_t start, block_t len)
1375{
1376	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1377	struct rb_node **p = &dcc->root.rb_root.rb_node;
1378	struct rb_node *parent = NULL;
1379	struct discard_cmd *dc;
1380	bool leftmost = true;
1381
1382	/* look up rb tree to find parent node */
1383	while (*p) {
1384		parent = *p;
1385		dc = rb_entry(parent, struct discard_cmd, rb_node);
1386
1387		if (lstart < dc->di.lstart) {
1388			p = &(*p)->rb_left;
1389		} else if (lstart >= dc->di.lstart + dc->di.len) {
1390			p = &(*p)->rb_right;
1391			leftmost = false;
1392		} else {
1393			/* Let's skip to add, if exists */
1394			return;
1395		}
1396	}
1397
1398	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1399
1400	rb_link_node(&dc->rb_node, parent, p);
1401	rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1402}
1403
1404static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1405						struct discard_cmd *dc)
1406{
1407	list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]);
1408}
1409
1410static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1411				struct discard_cmd *dc, block_t blkaddr)
1412{
1413	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1414	struct discard_info di = dc->di;
1415	bool modified = false;
1416
1417	if (dc->state == D_DONE || dc->di.len == 1) {
1418		__remove_discard_cmd(sbi, dc);
1419		return;
1420	}
1421
1422	dcc->undiscard_blks -= di.len;
1423
1424	if (blkaddr > di.lstart) {
1425		dc->di.len = blkaddr - dc->di.lstart;
1426		dcc->undiscard_blks += dc->di.len;
1427		__relocate_discard_cmd(dcc, dc);
1428		modified = true;
1429	}
1430
1431	if (blkaddr < di.lstart + di.len - 1) {
1432		if (modified) {
1433			__insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1434					di.start + blkaddr + 1 - di.lstart,
1435					di.lstart + di.len - 1 - blkaddr);
1436		} else {
1437			dc->di.lstart++;
1438			dc->di.len--;
1439			dc->di.start++;
1440			dcc->undiscard_blks += dc->di.len;
1441			__relocate_discard_cmd(dcc, dc);
1442		}
1443	}
1444}
1445
1446static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1447				struct block_device *bdev, block_t lstart,
1448				block_t start, block_t len)
1449{
1450	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1451	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1452	struct discard_cmd *dc;
1453	struct discard_info di = {0};
1454	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1455	unsigned int max_discard_blocks =
1456			SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev));
1457	block_t end = lstart + len;
1458
1459	dc = __lookup_discard_cmd_ret(&dcc->root, lstart,
1460				&prev_dc, &next_dc, &insert_p, &insert_parent);
1461	if (dc)
1462		prev_dc = dc;
1463
1464	if (!prev_dc) {
1465		di.lstart = lstart;
1466		di.len = next_dc ? next_dc->di.lstart - lstart : len;
1467		di.len = min(di.len, len);
1468		di.start = start;
1469	}
1470
1471	while (1) {
1472		struct rb_node *node;
1473		bool merged = false;
1474		struct discard_cmd *tdc = NULL;
1475
1476		if (prev_dc) {
1477			di.lstart = prev_dc->di.lstart + prev_dc->di.len;
1478			if (di.lstart < lstart)
1479				di.lstart = lstart;
1480			if (di.lstart >= end)
1481				break;
1482
1483			if (!next_dc || next_dc->di.lstart > end)
1484				di.len = end - di.lstart;
1485			else
1486				di.len = next_dc->di.lstart - di.lstart;
1487			di.start = start + di.lstart - lstart;
1488		}
1489
1490		if (!di.len)
1491			goto next;
1492
1493		if (prev_dc && prev_dc->state == D_PREP &&
1494			prev_dc->bdev == bdev &&
1495			__is_discard_back_mergeable(&di, &prev_dc->di,
1496							max_discard_blocks)) {
1497			prev_dc->di.len += di.len;
1498			dcc->undiscard_blks += di.len;
1499			__relocate_discard_cmd(dcc, prev_dc);
1500			di = prev_dc->di;
1501			tdc = prev_dc;
1502			merged = true;
1503		}
1504
1505		if (next_dc && next_dc->state == D_PREP &&
1506			next_dc->bdev == bdev &&
1507			__is_discard_front_mergeable(&di, &next_dc->di,
1508							max_discard_blocks)) {
1509			next_dc->di.lstart = di.lstart;
1510			next_dc->di.len += di.len;
1511			next_dc->di.start = di.start;
1512			dcc->undiscard_blks += di.len;
1513			__relocate_discard_cmd(dcc, next_dc);
1514			if (tdc)
1515				__remove_discard_cmd(sbi, tdc);
1516			merged = true;
1517		}
1518
1519		if (!merged)
1520			__insert_discard_cmd(sbi, bdev,
1521						di.lstart, di.start, di.len);
1522 next:
1523		prev_dc = next_dc;
1524		if (!prev_dc)
1525			break;
1526
1527		node = rb_next(&prev_dc->rb_node);
1528		next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1529	}
1530}
1531
1532#ifdef CONFIG_BLK_DEV_ZONED
1533static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
1534		struct block_device *bdev, block_t blkstart, block_t lblkstart,
1535		block_t blklen)
1536{
1537	trace_f2fs_queue_reset_zone(bdev, blkstart);
1538
1539	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1540	__insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
1541	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1542}
1543#endif
1544
1545static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1546		struct block_device *bdev, block_t blkstart, block_t blklen)
1547{
1548	block_t lblkstart = blkstart;
1549
1550	if (!f2fs_bdev_support_discard(bdev))
1551		return;
1552
1553	trace_f2fs_queue_discard(bdev, blkstart, blklen);
1554
1555	if (f2fs_is_multi_device(sbi)) {
1556		int devi = f2fs_target_device_index(sbi, blkstart);
1557
1558		blkstart -= FDEV(devi).start_blk;
1559	}
1560	mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1561	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1562	mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1563}
1564
1565static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1566		struct discard_policy *dpolicy, int *issued)
1567{
1568	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1569	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1570	struct rb_node **insert_p = NULL, *insert_parent = NULL;
1571	struct discard_cmd *dc;
1572	struct blk_plug plug;
1573	bool io_interrupted = false;
1574
1575	mutex_lock(&dcc->cmd_lock);
1576	dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos,
1577				&prev_dc, &next_dc, &insert_p, &insert_parent);
1578	if (!dc)
1579		dc = next_dc;
1580
1581	blk_start_plug(&plug);
1582
1583	while (dc) {
1584		struct rb_node *node;
1585		int err = 0;
1586
1587		if (dc->state != D_PREP)
1588			goto next;
1589
1590		if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1591			io_interrupted = true;
1592			break;
1593		}
1594
1595		dcc->next_pos = dc->di.lstart + dc->di.len;
1596		err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
1597
1598		if (*issued >= dpolicy->max_requests)
1599			break;
1600next:
1601		node = rb_next(&dc->rb_node);
1602		if (err)
1603			__remove_discard_cmd(sbi, dc);
1604		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1605	}
1606
1607	blk_finish_plug(&plug);
1608
1609	if (!dc)
1610		dcc->next_pos = 0;
1611
1612	mutex_unlock(&dcc->cmd_lock);
1613
1614	if (!(*issued) && io_interrupted)
1615		*issued = -1;
1616}
1617static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1618					struct discard_policy *dpolicy);
1619
1620static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1621					struct discard_policy *dpolicy)
1622{
1623	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1624	struct list_head *pend_list;
1625	struct discard_cmd *dc, *tmp;
1626	struct blk_plug plug;
1627	int i, issued;
1628	bool io_interrupted = false;
1629
1630	if (dpolicy->timeout)
1631		f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1632
1633retry:
1634	issued = 0;
1635	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1636		if (dpolicy->timeout &&
1637				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1638			break;
1639
1640		if (i + 1 < dpolicy->granularity)
1641			break;
1642
1643		if (i + 1 < dcc->max_ordered_discard && dpolicy->ordered) {
1644			__issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1645			return issued;
1646		}
1647
1648		pend_list = &dcc->pend_list[i];
1649
1650		mutex_lock(&dcc->cmd_lock);
1651		if (list_empty(pend_list))
1652			goto next;
1653		if (unlikely(dcc->rbtree_check))
1654			f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
1655		blk_start_plug(&plug);
1656		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1657			f2fs_bug_on(sbi, dc->state != D_PREP);
1658
1659			if (dpolicy->timeout &&
1660				f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1661				break;
1662
1663			if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1664						!is_idle(sbi, DISCARD_TIME)) {
1665				io_interrupted = true;
1666				break;
1667			}
1668
1669			__submit_discard_cmd(sbi, dpolicy, dc, &issued);
1670
1671			if (issued >= dpolicy->max_requests)
1672				break;
1673		}
1674		blk_finish_plug(&plug);
1675next:
1676		mutex_unlock(&dcc->cmd_lock);
1677
1678		if (issued >= dpolicy->max_requests || io_interrupted)
1679			break;
1680	}
1681
1682	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1683		__wait_all_discard_cmd(sbi, dpolicy);
1684		goto retry;
1685	}
1686
1687	if (!issued && io_interrupted)
1688		issued = -1;
1689
1690	return issued;
1691}
1692
1693static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1694{
1695	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1696	struct list_head *pend_list;
1697	struct discard_cmd *dc, *tmp;
1698	int i;
1699	bool dropped = false;
1700
1701	mutex_lock(&dcc->cmd_lock);
1702	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1703		pend_list = &dcc->pend_list[i];
1704		list_for_each_entry_safe(dc, tmp, pend_list, list) {
1705			f2fs_bug_on(sbi, dc->state != D_PREP);
1706			__remove_discard_cmd(sbi, dc);
1707			dropped = true;
1708		}
1709	}
1710	mutex_unlock(&dcc->cmd_lock);
1711
1712	return dropped;
1713}
1714
1715void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1716{
1717	__drop_discard_cmd(sbi);
1718}
1719
1720static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1721							struct discard_cmd *dc)
1722{
1723	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1724	unsigned int len = 0;
1725
1726	wait_for_completion_io(&dc->wait);
1727	mutex_lock(&dcc->cmd_lock);
1728	f2fs_bug_on(sbi, dc->state != D_DONE);
1729	dc->ref--;
1730	if (!dc->ref) {
1731		if (!dc->error)
1732			len = dc->di.len;
1733		__remove_discard_cmd(sbi, dc);
1734	}
1735	mutex_unlock(&dcc->cmd_lock);
1736
1737	return len;
1738}
1739
1740static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1741						struct discard_policy *dpolicy,
1742						block_t start, block_t end)
1743{
1744	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1745	struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1746					&(dcc->fstrim_list) : &(dcc->wait_list);
1747	struct discard_cmd *dc = NULL, *iter, *tmp;
1748	unsigned int trimmed = 0;
1749
1750next:
1751	dc = NULL;
1752
1753	mutex_lock(&dcc->cmd_lock);
1754	list_for_each_entry_safe(iter, tmp, wait_list, list) {
1755		if (iter->di.lstart + iter->di.len <= start ||
1756					end <= iter->di.lstart)
1757			continue;
1758		if (iter->di.len < dpolicy->granularity)
1759			continue;
1760		if (iter->state == D_DONE && !iter->ref) {
1761			wait_for_completion_io(&iter->wait);
1762			if (!iter->error)
1763				trimmed += iter->di.len;
1764			__remove_discard_cmd(sbi, iter);
1765		} else {
1766			iter->ref++;
1767			dc = iter;
1768			break;
1769		}
1770	}
1771	mutex_unlock(&dcc->cmd_lock);
1772
1773	if (dc) {
1774		trimmed += __wait_one_discard_bio(sbi, dc);
1775		goto next;
1776	}
1777
1778	return trimmed;
1779}
1780
1781static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1782						struct discard_policy *dpolicy)
1783{
1784	struct discard_policy dp;
1785	unsigned int discard_blks;
1786
1787	if (dpolicy)
1788		return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1789
1790	/* wait all */
1791	__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
1792	discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1793	__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
1794	discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1795
1796	return discard_blks;
1797}
1798
1799/* This should be covered by global mutex, &sit_i->sentry_lock */
1800static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1801{
1802	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1803	struct discard_cmd *dc;
1804	bool need_wait = false;
1805
1806	mutex_lock(&dcc->cmd_lock);
1807	dc = __lookup_discard_cmd(sbi, blkaddr);
1808#ifdef CONFIG_BLK_DEV_ZONED
1809	if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
1810		int devi = f2fs_bdev_index(sbi, dc->bdev);
1811
1812		if (devi < 0) {
1813			mutex_unlock(&dcc->cmd_lock);
1814			return;
1815		}
1816
1817		if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1818			/* force submit zone reset */
1819			if (dc->state == D_PREP)
1820				__submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
1821							&dcc->wait_list, NULL);
1822			dc->ref++;
1823			mutex_unlock(&dcc->cmd_lock);
1824			/* wait zone reset */
1825			__wait_one_discard_bio(sbi, dc);
1826			return;
1827		}
1828	}
1829#endif
1830	if (dc) {
1831		if (dc->state == D_PREP) {
1832			__punch_discard_cmd(sbi, dc, blkaddr);
1833		} else {
1834			dc->ref++;
1835			need_wait = true;
1836		}
1837	}
1838	mutex_unlock(&dcc->cmd_lock);
1839
1840	if (need_wait)
1841		__wait_one_discard_bio(sbi, dc);
1842}
1843
1844void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1845{
1846	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1847
1848	if (dcc && dcc->f2fs_issue_discard) {
1849		struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1850
1851		dcc->f2fs_issue_discard = NULL;
1852		kthread_stop(discard_thread);
1853	}
1854}
1855
1856/**
1857 * f2fs_issue_discard_timeout() - Issue all discard cmd within UMOUNT_DISCARD_TIMEOUT
1858 * @sbi: the f2fs_sb_info data for discard cmd to issue
1859 *
1860 * When UMOUNT_DISCARD_TIMEOUT is exceeded, all remaining discard commands will be dropped
1861 *
1862 * Return true if issued all discard cmd or no discard cmd need issue, otherwise return false.
1863 */
1864bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1865{
1866	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1867	struct discard_policy dpolicy;
1868	bool dropped;
1869
1870	if (!atomic_read(&dcc->discard_cmd_cnt))
1871		return true;
1872
1873	__init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1874					dcc->discard_granularity);
1875	__issue_discard_cmd(sbi, &dpolicy);
1876	dropped = __drop_discard_cmd(sbi);
1877
1878	/* just to make sure there is no pending discard commands */
1879	__wait_all_discard_cmd(sbi, NULL);
1880
1881	f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1882	return !dropped;
1883}
1884
1885static int issue_discard_thread(void *data)
1886{
1887	struct f2fs_sb_info *sbi = data;
1888	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1889	wait_queue_head_t *q = &dcc->discard_wait_queue;
1890	struct discard_policy dpolicy;
1891	unsigned int wait_ms = dcc->min_discard_issue_time;
1892	int issued;
1893
1894	set_freezable();
1895
1896	do {
1897		wait_event_freezable_timeout(*q,
1898				kthread_should_stop() || dcc->discard_wake,
1899				msecs_to_jiffies(wait_ms));
1900
1901		if (sbi->gc_mode == GC_URGENT_HIGH ||
1902			!f2fs_available_free_memory(sbi, DISCARD_CACHE))
1903			__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1904						MIN_DISCARD_GRANULARITY);
1905		else
1906			__init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1907						dcc->discard_granularity);
1908
1909		if (dcc->discard_wake)
1910			dcc->discard_wake = false;
1911
1912		/* clean up pending candidates before going to sleep */
1913		if (atomic_read(&dcc->queued_discard))
1914			__wait_all_discard_cmd(sbi, NULL);
1915
1916		if (f2fs_readonly(sbi->sb))
1917			continue;
1918		if (kthread_should_stop())
1919			return 0;
1920		if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1921			!atomic_read(&dcc->discard_cmd_cnt)) {
1922			wait_ms = dpolicy.max_interval;
1923			continue;
1924		}
1925
1926		sb_start_intwrite(sbi->sb);
1927
1928		issued = __issue_discard_cmd(sbi, &dpolicy);
1929		if (issued > 0) {
1930			__wait_all_discard_cmd(sbi, &dpolicy);
1931			wait_ms = dpolicy.min_interval;
1932		} else if (issued == -1) {
1933			wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1934			if (!wait_ms)
1935				wait_ms = dpolicy.mid_interval;
1936		} else {
1937			wait_ms = dpolicy.max_interval;
1938		}
1939		if (!atomic_read(&dcc->discard_cmd_cnt))
1940			wait_ms = dpolicy.max_interval;
1941
1942		sb_end_intwrite(sbi->sb);
1943
1944	} while (!kthread_should_stop());
1945	return 0;
1946}
1947
1948#ifdef CONFIG_BLK_DEV_ZONED
1949static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1950		struct block_device *bdev, block_t blkstart, block_t blklen)
1951{
1952	sector_t sector, nr_sects;
1953	block_t lblkstart = blkstart;
1954	int devi = 0;
1955	u64 remainder = 0;
1956
1957	if (f2fs_is_multi_device(sbi)) {
1958		devi = f2fs_target_device_index(sbi, blkstart);
1959		if (blkstart < FDEV(devi).start_blk ||
1960		    blkstart > FDEV(devi).end_blk) {
1961			f2fs_err(sbi, "Invalid block %x", blkstart);
1962			return -EIO;
1963		}
1964		blkstart -= FDEV(devi).start_blk;
1965	}
1966
1967	/* For sequential zones, reset the zone write pointer */
1968	if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1969		sector = SECTOR_FROM_BLOCK(blkstart);
1970		nr_sects = SECTOR_FROM_BLOCK(blklen);
1971		div64_u64_rem(sector, bdev_zone_sectors(bdev), &remainder);
1972
1973		if (remainder || nr_sects != bdev_zone_sectors(bdev)) {
1974			f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1975				 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1976				 blkstart, blklen);
1977			return -EIO;
1978		}
1979
1980		if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
1981			unsigned int nofs_flags;
1982			int ret;
1983
1984			trace_f2fs_issue_reset_zone(bdev, blkstart);
1985			nofs_flags = memalloc_nofs_save();
1986			ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1987						sector, nr_sects);
1988			memalloc_nofs_restore(nofs_flags);
1989			return ret;
1990		}
1991
1992		__queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
1993		return 0;
1994	}
1995
1996	/* For conventional zones, use regular discard if supported */
1997	__queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1998	return 0;
1999}
2000#endif
2001
2002static int __issue_discard_async(struct f2fs_sb_info *sbi,
2003		struct block_device *bdev, block_t blkstart, block_t blklen)
2004{
2005#ifdef CONFIG_BLK_DEV_ZONED
2006	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
2007		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
2008#endif
2009	__queue_discard_cmd(sbi, bdev, blkstart, blklen);
2010	return 0;
2011}
2012
2013static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2014				block_t blkstart, block_t blklen)
2015{
2016	sector_t start = blkstart, len = 0;
2017	struct block_device *bdev;
2018	struct seg_entry *se;
2019	unsigned int offset;
2020	block_t i;
2021	int err = 0;
2022
2023	bdev = f2fs_target_device(sbi, blkstart, NULL);
2024
2025	for (i = blkstart; i < blkstart + blklen; i++, len++) {
2026		if (i != start) {
2027			struct block_device *bdev2 =
2028				f2fs_target_device(sbi, i, NULL);
2029
2030			if (bdev2 != bdev) {
2031				err = __issue_discard_async(sbi, bdev,
2032						start, len);
2033				if (err)
2034					return err;
2035				bdev = bdev2;
2036				start = i;
2037				len = 0;
2038			}
2039		}
2040
2041		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2042		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2043
2044		if (f2fs_block_unit_discard(sbi) &&
2045				!f2fs_test_and_set_bit(offset, se->discard_map))
2046			sbi->discard_blks--;
2047	}
2048
2049	if (len)
2050		err = __issue_discard_async(sbi, bdev, start, len);
2051	return err;
2052}
2053
2054static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2055							bool check_only)
2056{
2057	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2058	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2059	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2060	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2061	unsigned long *discard_map = (unsigned long *)se->discard_map;
2062	unsigned long *dmap = SIT_I(sbi)->tmp_map;
2063	unsigned int start = 0, end = -1;
2064	bool force = (cpc->reason & CP_DISCARD);
2065	struct discard_entry *de = NULL;
2066	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2067	int i;
2068
2069	if (se->valid_blocks == BLKS_PER_SEG(sbi) ||
2070	    !f2fs_hw_support_discard(sbi) ||
2071	    !f2fs_block_unit_discard(sbi))
2072		return false;
2073
2074	if (!force) {
2075		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
2076			SM_I(sbi)->dcc_info->nr_discards >=
2077				SM_I(sbi)->dcc_info->max_discards)
2078			return false;
2079	}
2080
2081	/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
2082	for (i = 0; i < entries; i++)
2083		dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
2084				(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
2085
2086	while (force || SM_I(sbi)->dcc_info->nr_discards <=
2087				SM_I(sbi)->dcc_info->max_discards) {
2088		start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1);
2089		if (start >= BLKS_PER_SEG(sbi))
2090			break;
2091
2092		end = __find_rev_next_zero_bit(dmap,
2093						BLKS_PER_SEG(sbi), start + 1);
2094		if (force && start && end != BLKS_PER_SEG(sbi) &&
2095		    (end - start) < cpc->trim_minlen)
2096			continue;
2097
2098		if (check_only)
2099			return true;
2100
2101		if (!de) {
2102			de = f2fs_kmem_cache_alloc(discard_entry_slab,
2103						GFP_F2FS_ZERO, true, NULL);
2104			de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2105			list_add_tail(&de->list, head);
2106		}
2107
2108		for (i = start; i < end; i++)
2109			__set_bit_le(i, (void *)de->discard_map);
2110
2111		SM_I(sbi)->dcc_info->nr_discards += end - start;
2112	}
2113	return false;
2114}
2115
2116static void release_discard_addr(struct discard_entry *entry)
2117{
2118	list_del(&entry->list);
2119	kmem_cache_free(discard_entry_slab, entry);
2120}
2121
2122void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2123{
2124	struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2125	struct discard_entry *entry, *this;
2126
2127	/* drop caches */
2128	list_for_each_entry_safe(entry, this, head, list)
2129		release_discard_addr(entry);
2130}
2131
2132/*
2133 * Should call f2fs_clear_prefree_segments after checkpoint is done.
2134 */
2135static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2136{
2137	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2138	unsigned int segno;
2139
2140	mutex_lock(&dirty_i->seglist_lock);
2141	for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2142		__set_test_and_free(sbi, segno, false);
2143	mutex_unlock(&dirty_i->seglist_lock);
2144}
2145
2146void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2147						struct cp_control *cpc)
2148{
2149	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2150	struct list_head *head = &dcc->entry_list;
2151	struct discard_entry *entry, *this;
2152	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2153	unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2154	unsigned int start = 0, end = -1;
2155	unsigned int secno, start_segno;
2156	bool force = (cpc->reason & CP_DISCARD);
2157	bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2158						DISCARD_UNIT_SECTION;
2159
2160	if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2161		section_alignment = true;
2162
2163	mutex_lock(&dirty_i->seglist_lock);
2164
2165	while (1) {
2166		int i;
2167
2168		if (section_alignment && end != -1)
2169			end--;
2170		start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2171		if (start >= MAIN_SEGS(sbi))
2172			break;
2173		end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2174								start + 1);
2175
2176		if (section_alignment) {
2177			start = rounddown(start, SEGS_PER_SEC(sbi));
2178			end = roundup(end, SEGS_PER_SEC(sbi));
2179		}
2180
2181		for (i = start; i < end; i++) {
2182			if (test_and_clear_bit(i, prefree_map))
2183				dirty_i->nr_dirty[PRE]--;
2184		}
2185
2186		if (!f2fs_realtime_discard_enable(sbi))
2187			continue;
2188
2189		if (force && start >= cpc->trim_start &&
2190					(end - 1) <= cpc->trim_end)
2191			continue;
2192
2193		/* Should cover 2MB zoned device for zone-based reset */
2194		if (!f2fs_sb_has_blkzoned(sbi) &&
2195		    (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
2196			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2197				SEGS_TO_BLKS(sbi, end - start));
2198			continue;
2199		}
2200next:
2201		secno = GET_SEC_FROM_SEG(sbi, start);
2202		start_segno = GET_SEG_FROM_SEC(sbi, secno);
2203		if (!IS_CURSEC(sbi, secno) &&
2204			!get_valid_blocks(sbi, start, true))
2205			f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2206						BLKS_PER_SEC(sbi));
2207
2208		start = start_segno + SEGS_PER_SEC(sbi);
2209		if (start < end)
2210			goto next;
2211		else
2212			end = start - 1;
2213	}
2214	mutex_unlock(&dirty_i->seglist_lock);
2215
2216	if (!f2fs_block_unit_discard(sbi))
2217		goto wakeup;
2218
2219	/* send small discards */
2220	list_for_each_entry_safe(entry, this, head, list) {
2221		unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2222		bool is_valid = test_bit_le(0, entry->discard_map);
2223
2224find_next:
2225		if (is_valid) {
2226			next_pos = find_next_zero_bit_le(entry->discard_map,
2227						BLKS_PER_SEG(sbi), cur_pos);
2228			len = next_pos - cur_pos;
2229
2230			if (f2fs_sb_has_blkzoned(sbi) ||
2231			    (force && len < cpc->trim_minlen))
2232				goto skip;
2233
2234			f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2235									len);
2236			total_len += len;
2237		} else {
2238			next_pos = find_next_bit_le(entry->discard_map,
2239						BLKS_PER_SEG(sbi), cur_pos);
2240		}
2241skip:
2242		cur_pos = next_pos;
2243		is_valid = !is_valid;
2244
2245		if (cur_pos < BLKS_PER_SEG(sbi))
2246			goto find_next;
2247
2248		release_discard_addr(entry);
2249		dcc->nr_discards -= total_len;
2250	}
2251
2252wakeup:
2253	wake_up_discard_thread(sbi, false);
2254}
2255
2256int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2257{
2258	dev_t dev = sbi->sb->s_bdev->bd_dev;
2259	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2260	int err = 0;
2261
2262	if (f2fs_sb_has_readonly(sbi)) {
2263		f2fs_info(sbi,
2264			"Skip to start discard thread for readonly image");
2265		return 0;
2266	}
2267
2268	if (!f2fs_realtime_discard_enable(sbi))
2269		return 0;
2270
2271	dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2272				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2273	if (IS_ERR(dcc->f2fs_issue_discard)) {
2274		err = PTR_ERR(dcc->f2fs_issue_discard);
2275		dcc->f2fs_issue_discard = NULL;
2276	}
2277
2278	return err;
2279}
2280
2281static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2282{
2283	struct discard_cmd_control *dcc;
2284	int err = 0, i;
2285
2286	if (SM_I(sbi)->dcc_info) {
2287		dcc = SM_I(sbi)->dcc_info;
2288		goto init_thread;
2289	}
2290
2291	dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2292	if (!dcc)
2293		return -ENOMEM;
2294
2295	dcc->discard_io_aware_gran = MAX_PLIST_NUM;
2296	dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2297	dcc->max_ordered_discard = DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY;
2298	dcc->discard_io_aware = DPOLICY_IO_AWARE_ENABLE;
2299	if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2300		dcc->discard_granularity = BLKS_PER_SEG(sbi);
2301	else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2302		dcc->discard_granularity = BLKS_PER_SEC(sbi);
2303
2304	INIT_LIST_HEAD(&dcc->entry_list);
2305	for (i = 0; i < MAX_PLIST_NUM; i++)
2306		INIT_LIST_HEAD(&dcc->pend_list[i]);
2307	INIT_LIST_HEAD(&dcc->wait_list);
2308	INIT_LIST_HEAD(&dcc->fstrim_list);
2309	mutex_init(&dcc->cmd_lock);
2310	atomic_set(&dcc->issued_discard, 0);
2311	atomic_set(&dcc->queued_discard, 0);
2312	atomic_set(&dcc->discard_cmd_cnt, 0);
2313	dcc->nr_discards = 0;
2314	dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi));
2315	dcc->max_discard_request = DEF_MAX_DISCARD_REQUEST;
2316	dcc->min_discard_issue_time = DEF_MIN_DISCARD_ISSUE_TIME;
2317	dcc->mid_discard_issue_time = DEF_MID_DISCARD_ISSUE_TIME;
2318	dcc->max_discard_issue_time = DEF_MAX_DISCARD_ISSUE_TIME;
2319	dcc->discard_urgent_util = DEF_DISCARD_URGENT_UTIL;
2320	dcc->undiscard_blks = 0;
2321	dcc->next_pos = 0;
2322	dcc->root = RB_ROOT_CACHED;
2323	dcc->rbtree_check = false;
2324
2325	init_waitqueue_head(&dcc->discard_wait_queue);
2326	SM_I(sbi)->dcc_info = dcc;
2327init_thread:
2328	err = f2fs_start_discard_thread(sbi);
2329	if (err) {
2330		kfree(dcc);
2331		SM_I(sbi)->dcc_info = NULL;
2332	}
2333
2334	return err;
2335}
2336
2337static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2338{
2339	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2340
2341	if (!dcc)
2342		return;
2343
2344	f2fs_stop_discard_thread(sbi);
2345
2346	/*
2347	 * Recovery can cache discard commands, so in error path of
2348	 * fill_super(), it needs to give a chance to handle them.
2349	 */
2350	f2fs_issue_discard_timeout(sbi);
2351
2352	kfree(dcc);
2353	SM_I(sbi)->dcc_info = NULL;
2354}
2355
2356static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2357{
2358	struct sit_info *sit_i = SIT_I(sbi);
2359
2360	if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2361		sit_i->dirty_sentries++;
2362		return false;
2363	}
2364
2365	return true;
2366}
2367
2368static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2369					unsigned int segno, int modified)
2370{
2371	struct seg_entry *se = get_seg_entry(sbi, segno);
2372
2373	se->type = type;
2374	if (modified)
2375		__mark_sit_entry_dirty(sbi, segno);
2376}
2377
2378static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2379								block_t blkaddr)
2380{
2381	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2382
2383	if (segno == NULL_SEGNO)
2384		return 0;
2385	return get_seg_entry(sbi, segno)->mtime;
2386}
2387
2388static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2389						unsigned long long old_mtime)
2390{
2391	struct seg_entry *se;
2392	unsigned int segno = GET_SEGNO(sbi, blkaddr);
2393	unsigned long long ctime = get_mtime(sbi, false);
2394	unsigned long long mtime = old_mtime ? old_mtime : ctime;
2395
2396	if (segno == NULL_SEGNO)
2397		return;
2398
2399	se = get_seg_entry(sbi, segno);
2400
2401	if (!se->mtime)
2402		se->mtime = mtime;
2403	else
2404		se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2405						se->valid_blocks + 1);
2406
2407	if (ctime > SIT_I(sbi)->max_mtime)
2408		SIT_I(sbi)->max_mtime = ctime;
2409}
2410
2411static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2412{
2413	struct seg_entry *se;
2414	unsigned int segno, offset;
2415	long int new_vblocks;
2416	bool exist;
2417#ifdef CONFIG_F2FS_CHECK_FS
2418	bool mir_exist;
2419#endif
2420
2421	segno = GET_SEGNO(sbi, blkaddr);
2422	if (segno == NULL_SEGNO)
2423		return;
2424
2425	se = get_seg_entry(sbi, segno);
2426	new_vblocks = se->valid_blocks + del;
2427	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2428
2429	f2fs_bug_on(sbi, (new_vblocks < 0 ||
2430			(new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2431
2432	se->valid_blocks = new_vblocks;
2433
2434	/* Update valid block bitmap */
2435	if (del > 0) {
2436		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2437#ifdef CONFIG_F2FS_CHECK_FS
2438		mir_exist = f2fs_test_and_set_bit(offset,
2439						se->cur_valid_map_mir);
2440		if (unlikely(exist != mir_exist)) {
2441			f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2442				 blkaddr, exist);
2443			f2fs_bug_on(sbi, 1);
2444		}
2445#endif
2446		if (unlikely(exist)) {
2447			f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2448				 blkaddr);
2449			f2fs_bug_on(sbi, 1);
2450			se->valid_blocks--;
2451			del = 0;
2452		}
2453
2454		if (f2fs_block_unit_discard(sbi) &&
2455				!f2fs_test_and_set_bit(offset, se->discard_map))
2456			sbi->discard_blks--;
2457
2458		/*
2459		 * SSR should never reuse block which is checkpointed
2460		 * or newly invalidated.
2461		 */
2462		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2463			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2464				se->ckpt_valid_blocks++;
2465		}
2466	} else {
2467		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2468#ifdef CONFIG_F2FS_CHECK_FS
2469		mir_exist = f2fs_test_and_clear_bit(offset,
2470						se->cur_valid_map_mir);
2471		if (unlikely(exist != mir_exist)) {
2472			f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2473				 blkaddr, exist);
2474			f2fs_bug_on(sbi, 1);
2475		}
2476#endif
2477		if (unlikely(!exist)) {
2478			f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2479				 blkaddr);
2480			f2fs_bug_on(sbi, 1);
2481			se->valid_blocks++;
2482			del = 0;
2483		} else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2484			/*
2485			 * If checkpoints are off, we must not reuse data that
2486			 * was used in the previous checkpoint. If it was used
2487			 * before, we must track that to know how much space we
2488			 * really have.
2489			 */
2490			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2491				spin_lock(&sbi->stat_lock);
2492				sbi->unusable_block_count++;
2493				spin_unlock(&sbi->stat_lock);
2494			}
2495		}
2496
2497		if (f2fs_block_unit_discard(sbi) &&
2498			f2fs_test_and_clear_bit(offset, se->discard_map))
2499			sbi->discard_blks++;
2500	}
2501	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2502		se->ckpt_valid_blocks += del;
2503
2504	__mark_sit_entry_dirty(sbi, segno);
2505
2506	/* update total number of valid blocks to be written in ckpt area */
2507	SIT_I(sbi)->written_valid_blocks += del;
2508
2509	if (__is_large_section(sbi))
2510		get_sec_entry(sbi, segno)->valid_blocks += del;
2511}
2512
2513void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2514{
2515	unsigned int segno = GET_SEGNO(sbi, addr);
2516	struct sit_info *sit_i = SIT_I(sbi);
2517
2518	f2fs_bug_on(sbi, addr == NULL_ADDR);
2519	if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2520		return;
2521
2522	f2fs_invalidate_internal_cache(sbi, addr);
2523
2524	/* add it into sit main buffer */
2525	down_write(&sit_i->sentry_lock);
2526
2527	update_segment_mtime(sbi, addr, 0);
2528	update_sit_entry(sbi, addr, -1);
2529
2530	/* add it into dirty seglist */
2531	locate_dirty_segment(sbi, segno);
2532
2533	up_write(&sit_i->sentry_lock);
2534}
2535
2536bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2537{
2538	struct sit_info *sit_i = SIT_I(sbi);
2539	unsigned int segno, offset;
2540	struct seg_entry *se;
2541	bool is_cp = false;
2542
2543	if (!__is_valid_data_blkaddr(blkaddr))
2544		return true;
2545
2546	down_read(&sit_i->sentry_lock);
2547
2548	segno = GET_SEGNO(sbi, blkaddr);
2549	se = get_seg_entry(sbi, segno);
2550	offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2551
2552	if (f2fs_test_bit(offset, se->ckpt_valid_map))
2553		is_cp = true;
2554
2555	up_read(&sit_i->sentry_lock);
2556
2557	return is_cp;
2558}
2559
2560static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
2561{
2562	struct curseg_info *curseg = CURSEG_I(sbi, type);
2563
2564	if (sbi->ckpt->alloc_type[type] == SSR)
2565		return BLKS_PER_SEG(sbi);
2566	return curseg->next_blkoff;
2567}
2568
2569/*
2570 * Calculate the number of current summary pages for writing
2571 */
2572int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2573{
2574	int valid_sum_count = 0;
2575	int i, sum_in_page;
2576
2577	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2578		if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
2579			valid_sum_count +=
2580				le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2581		else
2582			valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
2583	}
2584
2585	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2586			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2587	if (valid_sum_count <= sum_in_page)
2588		return 1;
2589	else if ((valid_sum_count - sum_in_page) <=
2590		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2591		return 2;
2592	return 3;
2593}
2594
2595/*
2596 * Caller should put this summary page
2597 */
2598struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2599{
2600	if (unlikely(f2fs_cp_error(sbi)))
2601		return ERR_PTR(-EIO);
2602	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2603}
2604
2605void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2606					void *src, block_t blk_addr)
2607{
2608	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2609
2610	memcpy(page_address(page), src, PAGE_SIZE);
2611	set_page_dirty(page);
2612	f2fs_put_page(page, 1);
2613}
2614
2615static void write_sum_page(struct f2fs_sb_info *sbi,
2616			struct f2fs_summary_block *sum_blk, block_t blk_addr)
2617{
2618	f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2619}
2620
2621static void write_current_sum_page(struct f2fs_sb_info *sbi,
2622						int type, block_t blk_addr)
2623{
2624	struct curseg_info *curseg = CURSEG_I(sbi, type);
2625	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2626	struct f2fs_summary_block *src = curseg->sum_blk;
2627	struct f2fs_summary_block *dst;
2628
2629	dst = (struct f2fs_summary_block *)page_address(page);
2630	memset(dst, 0, PAGE_SIZE);
2631
2632	mutex_lock(&curseg->curseg_mutex);
2633
2634	down_read(&curseg->journal_rwsem);
2635	memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2636	up_read(&curseg->journal_rwsem);
2637
2638	memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2639	memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2640
2641	mutex_unlock(&curseg->curseg_mutex);
2642
2643	set_page_dirty(page);
2644	f2fs_put_page(page, 1);
2645}
2646
2647static int is_next_segment_free(struct f2fs_sb_info *sbi,
2648				struct curseg_info *curseg, int type)
2649{
2650	unsigned int segno = curseg->segno + 1;
2651	struct free_segmap_info *free_i = FREE_I(sbi);
2652
2653	if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi))
2654		return !test_bit(segno, free_i->free_segmap);
2655	return 0;
2656}
2657
2658/*
2659 * Find a new segment from the free segments bitmap to right order
2660 * This function should be returned with success, otherwise BUG
2661 */
2662static int get_new_segment(struct f2fs_sb_info *sbi,
2663			unsigned int *newseg, bool new_sec, bool pinning)
2664{
2665	struct free_segmap_info *free_i = FREE_I(sbi);
2666	unsigned int segno, secno, zoneno;
2667	unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2668	unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2669	unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2670	bool init = true;
2671	int i;
2672	int ret = 0;
2673
2674	spin_lock(&free_i->segmap_lock);
2675
2676	if (time_to_inject(sbi, FAULT_NO_SEGMENT)) {
2677		ret = -ENOSPC;
2678		goto out_unlock;
2679	}
2680
2681	if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) {
2682		segno = find_next_zero_bit(free_i->free_segmap,
2683			GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2684		if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2685			goto got_it;
2686	}
2687
2688	/*
2689	 * If we format f2fs on zoned storage, let's try to get pinned sections
2690	 * from beginning of the storage, which should be a conventional one.
2691	 */
2692	if (f2fs_sb_has_blkzoned(sbi)) {
2693		segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
2694		hint = GET_SEC_FROM_SEG(sbi, segno);
2695	}
2696
2697find_other_zone:
2698	secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2699	if (secno >= MAIN_SECS(sbi)) {
2700		secno = find_first_zero_bit(free_i->free_secmap,
2701							MAIN_SECS(sbi));
2702		if (secno >= MAIN_SECS(sbi)) {
2703			ret = -ENOSPC;
2704			goto out_unlock;
2705		}
2706	}
2707	segno = GET_SEG_FROM_SEC(sbi, secno);
2708	zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2709
2710	/* give up on finding another zone */
2711	if (!init)
2712		goto got_it;
2713	if (sbi->secs_per_zone == 1)
2714		goto got_it;
2715	if (zoneno == old_zoneno)
2716		goto got_it;
2717	for (i = 0; i < NR_CURSEG_TYPE; i++)
2718		if (CURSEG_I(sbi, i)->zone == zoneno)
2719			break;
2720
2721	if (i < NR_CURSEG_TYPE) {
2722		/* zone is in user, try another */
2723		if (zoneno + 1 >= total_zones)
2724			hint = 0;
2725		else
2726			hint = (zoneno + 1) * sbi->secs_per_zone;
2727		init = false;
2728		goto find_other_zone;
2729	}
2730got_it:
2731	/* set it as dirty segment in free segmap */
2732	f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2733
2734	/* no free section in conventional zone */
2735	if (new_sec && pinning &&
2736		!f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) {
2737		ret = -EAGAIN;
2738		goto out_unlock;
2739	}
2740	__set_inuse(sbi, segno);
2741	*newseg = segno;
2742out_unlock:
2743	spin_unlock(&free_i->segmap_lock);
2744
2745	if (ret == -ENOSPC) {
2746		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT);
2747		f2fs_bug_on(sbi, 1);
2748	}
2749	return ret;
2750}
2751
2752static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2753{
2754	struct curseg_info *curseg = CURSEG_I(sbi, type);
2755	struct summary_footer *sum_footer;
2756	unsigned short seg_type = curseg->seg_type;
2757
2758	/* only happen when get_new_segment() fails */
2759	if (curseg->next_segno == NULL_SEGNO)
2760		return;
2761
2762	curseg->inited = true;
2763	curseg->segno = curseg->next_segno;
2764	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2765	curseg->next_blkoff = 0;
2766	curseg->next_segno = NULL_SEGNO;
2767
2768	sum_footer = &(curseg->sum_blk->footer);
2769	memset(sum_footer, 0, sizeof(struct summary_footer));
2770
2771	sanity_check_seg_type(sbi, seg_type);
2772
2773	if (IS_DATASEG(seg_type))
2774		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2775	if (IS_NODESEG(seg_type))
2776		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2777	__set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2778}
2779
2780static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2781{
2782	struct curseg_info *curseg = CURSEG_I(sbi, type);
2783	unsigned short seg_type = curseg->seg_type;
2784
2785	sanity_check_seg_type(sbi, seg_type);
2786	if (f2fs_need_rand_seg(sbi))
2787		return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
2788
2789	if (__is_large_section(sbi))
2790		return curseg->segno;
2791
2792	/* inmem log may not locate on any segment after mount */
2793	if (!curseg->inited)
2794		return 0;
2795
2796	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2797		return 0;
2798
2799	if (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type))
2800		return 0;
2801
2802	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2803		return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2804
2805	/* find segments from 0 to reuse freed segments */
2806	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2807		return 0;
2808
2809	return curseg->segno;
2810}
2811
2812/*
2813 * Allocate a current working segment.
2814 * This function always allocates a free segment in LFS manner.
2815 */
2816static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2817{
2818	struct curseg_info *curseg = CURSEG_I(sbi, type);
2819	unsigned int segno = curseg->segno;
2820	bool pinning = type == CURSEG_COLD_DATA_PINNED;
2821	int ret;
2822
2823	if (curseg->inited)
2824		write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));
2825
2826	segno = __get_next_segno(sbi, type);
2827	ret = get_new_segment(sbi, &segno, new_sec, pinning);
2828	if (ret) {
2829		if (ret == -ENOSPC)
2830			curseg->segno = NULL_SEGNO;
2831		return ret;
2832	}
2833
2834	curseg->next_segno = segno;
2835	reset_curseg(sbi, type, 1);
2836	curseg->alloc_type = LFS;
2837	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2838		curseg->fragment_remained_chunk =
2839				get_random_u32_inclusive(1, sbi->max_fragment_chunk);
2840	return 0;
2841}
2842
2843static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2844					int segno, block_t start)
2845{
2846	struct seg_entry *se = get_seg_entry(sbi, segno);
2847	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2848	unsigned long *target_map = SIT_I(sbi)->tmp_map;
2849	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2850	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2851	int i;
2852
2853	for (i = 0; i < entries; i++)
2854		target_map[i] = ckpt_map[i] | cur_map[i];
2855
2856	return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start);
2857}
2858
2859static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
2860		struct curseg_info *seg)
2861{
2862	return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
2863}
2864
2865bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2866{
2867	return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi);
2868}
2869
2870/*
2871 * This function always allocates a used segment(from dirty seglist) by SSR
2872 * manner, so it should recover the existing segment information of valid blocks
2873 */
2874static int change_curseg(struct f2fs_sb_info *sbi, int type)
2875{
2876	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2877	struct curseg_info *curseg = CURSEG_I(sbi, type);
2878	unsigned int new_segno = curseg->next_segno;
2879	struct f2fs_summary_block *sum_node;
2880	struct page *sum_page;
2881
2882	write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
2883
2884	__set_test_and_inuse(sbi, new_segno);
2885
2886	mutex_lock(&dirty_i->seglist_lock);
2887	__remove_dirty_segment(sbi, new_segno, PRE);
2888	__remove_dirty_segment(sbi, new_segno, DIRTY);
2889	mutex_unlock(&dirty_i->seglist_lock);
2890
2891	reset_curseg(sbi, type, 1);
2892	curseg->alloc_type = SSR;
2893	curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2894
2895	sum_page = f2fs_get_sum_page(sbi, new_segno);
2896	if (IS_ERR(sum_page)) {
2897		/* GC won't be able to use stale summary pages by cp_error */
2898		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2899		return PTR_ERR(sum_page);
2900	}
2901	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2902	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2903	f2fs_put_page(sum_page, 1);
2904	return 0;
2905}
2906
2907static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2908				int alloc_mode, unsigned long long age);
2909
2910static int get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2911					int target_type, int alloc_mode,
2912					unsigned long long age)
2913{
2914	struct curseg_info *curseg = CURSEG_I(sbi, type);
2915	int ret = 0;
2916
2917	curseg->seg_type = target_type;
2918
2919	if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2920		struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2921
2922		curseg->seg_type = se->type;
2923		ret = change_curseg(sbi, type);
2924	} else {
2925		/* allocate cold segment by default */
2926		curseg->seg_type = CURSEG_COLD_DATA;
2927		ret = new_curseg(sbi, type, true);
2928	}
2929	stat_inc_seg_type(sbi, curseg);
2930	return ret;
2931}
2932
2933static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2934{
2935	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2936	int ret = 0;
2937
2938	if (!sbi->am.atgc_enabled)
2939		return 0;
2940
2941	f2fs_down_read(&SM_I(sbi)->curseg_lock);
2942
2943	mutex_lock(&curseg->curseg_mutex);
2944	down_write(&SIT_I(sbi)->sentry_lock);
2945
2946	ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC,
2947					CURSEG_COLD_DATA, SSR, 0);
2948
2949	up_write(&SIT_I(sbi)->sentry_lock);
2950	mutex_unlock(&curseg->curseg_mutex);
2951
2952	f2fs_up_read(&SM_I(sbi)->curseg_lock);
2953	return ret;
2954}
2955int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2956{
2957	return __f2fs_init_atgc_curseg(sbi);
2958}
2959
2960static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2961{
2962	struct curseg_info *curseg = CURSEG_I(sbi, type);
2963
2964	mutex_lock(&curseg->curseg_mutex);
2965	if (!curseg->inited)
2966		goto out;
2967
2968	if (get_valid_blocks(sbi, curseg->segno, false)) {
2969		write_sum_page(sbi, curseg->sum_blk,
2970				GET_SUM_BLOCK(sbi, curseg->segno));
2971	} else {
2972		mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2973		__set_test_and_free(sbi, curseg->segno, true);
2974		mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2975	}
2976out:
2977	mutex_unlock(&curseg->curseg_mutex);
2978}
2979
2980void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2981{
2982	__f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2983
2984	if (sbi->am.atgc_enabled)
2985		__f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2986}
2987
2988static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2989{
2990	struct curseg_info *curseg = CURSEG_I(sbi, type);
2991
2992	mutex_lock(&curseg->curseg_mutex);
2993	if (!curseg->inited)
2994		goto out;
2995	if (get_valid_blocks(sbi, curseg->segno, false))
2996		goto out;
2997
2998	mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2999	__set_test_and_inuse(sbi, curseg->segno);
3000	mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
3001out:
3002	mutex_unlock(&curseg->curseg_mutex);
3003}
3004
3005void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
3006{
3007	__f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
3008
3009	if (sbi->am.atgc_enabled)
3010		__f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
3011}
3012
3013static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3014				int alloc_mode, unsigned long long age)
3015{
3016	struct curseg_info *curseg = CURSEG_I(sbi, type);
3017	unsigned segno = NULL_SEGNO;
3018	unsigned short seg_type = curseg->seg_type;
3019	int i, cnt;
3020	bool reversed = false;
3021
3022	sanity_check_seg_type(sbi, seg_type);
3023
3024	/* f2fs_need_SSR() already forces to do this */
3025	if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
3026		curseg->next_segno = segno;
3027		return 1;
3028	}
3029
3030	/* For node segments, let's do SSR more intensively */
3031	if (IS_NODESEG(seg_type)) {
3032		if (seg_type >= CURSEG_WARM_NODE) {
3033			reversed = true;
3034			i = CURSEG_COLD_NODE;
3035		} else {
3036			i = CURSEG_HOT_NODE;
3037		}
3038		cnt = NR_CURSEG_NODE_TYPE;
3039	} else {
3040		if (seg_type >= CURSEG_WARM_DATA) {
3041			reversed = true;
3042			i = CURSEG_COLD_DATA;
3043		} else {
3044			i = CURSEG_HOT_DATA;
3045		}
3046		cnt = NR_CURSEG_DATA_TYPE;
3047	}
3048
3049	for (; cnt-- > 0; reversed ? i-- : i++) {
3050		if (i == seg_type)
3051			continue;
3052		if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
3053			curseg->next_segno = segno;
3054			return 1;
3055		}
3056	}
3057
3058	/* find valid_blocks=0 in dirty list */
3059	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3060		segno = get_free_segment(sbi);
3061		if (segno != NULL_SEGNO) {
3062			curseg->next_segno = segno;
3063			return 1;
3064		}
3065	}
3066	return 0;
3067}
3068
3069static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3070{
3071	struct curseg_info *curseg = CURSEG_I(sbi, type);
3072
3073	if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3074	    curseg->seg_type == CURSEG_WARM_NODE)
3075		return true;
3076	if (curseg->alloc_type == LFS &&
3077	    is_next_segment_free(sbi, curseg, type) &&
3078	    likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3079		return true;
3080	if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
3081		return true;
3082	return false;
3083}
3084
3085int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3086					unsigned int start, unsigned int end)
3087{
3088	struct curseg_info *curseg = CURSEG_I(sbi, type);
3089	unsigned int segno;
3090	int ret = 0;
3091
3092	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3093	mutex_lock(&curseg->curseg_mutex);
3094	down_write(&SIT_I(sbi)->sentry_lock);
3095
3096	segno = CURSEG_I(sbi, type)->segno;
3097	if (segno < start || segno > end)
3098		goto unlock;
3099
3100	if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3101		ret = change_curseg(sbi, type);
3102	else
3103		ret = new_curseg(sbi, type, true);
3104
3105	stat_inc_seg_type(sbi, curseg);
3106
3107	locate_dirty_segment(sbi, segno);
3108unlock:
3109	up_write(&SIT_I(sbi)->sentry_lock);
3110
3111	if (segno != curseg->segno)
3112		f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3113			    type, segno, curseg->segno);
3114
3115	mutex_unlock(&curseg->curseg_mutex);
3116	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3117	return ret;
3118}
3119
3120static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3121						bool new_sec, bool force)
3122{
3123	struct curseg_info *curseg = CURSEG_I(sbi, type);
3124	unsigned int old_segno;
3125	int err = 0;
3126
3127	if (type == CURSEG_COLD_DATA_PINNED && !curseg->inited)
3128		goto allocate;
3129
3130	if (!force && curseg->inited &&
3131	    !curseg->next_blkoff &&
3132	    !get_valid_blocks(sbi, curseg->segno, new_sec) &&
3133	    !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3134		return 0;
3135
3136allocate:
3137	old_segno = curseg->segno;
3138	err = new_curseg(sbi, type, true);
3139	if (err)
3140		return err;
3141	stat_inc_seg_type(sbi, curseg);
3142	locate_dirty_segment(sbi, old_segno);
3143	return 0;
3144}
3145
3146int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3147{
3148	int ret;
3149
3150	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3151	down_write(&SIT_I(sbi)->sentry_lock);
3152	ret = __allocate_new_segment(sbi, type, true, force);
3153	up_write(&SIT_I(sbi)->sentry_lock);
3154	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3155
3156	return ret;
3157}
3158
3159int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi)
3160{
3161	int err;
3162	bool gc_required = true;
3163
3164retry:
3165	f2fs_lock_op(sbi);
3166	err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3167	f2fs_unlock_op(sbi);
3168
3169	if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
3170		f2fs_down_write(&sbi->gc_lock);
3171		err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), true, 1);
3172		f2fs_up_write(&sbi->gc_lock);
3173
3174		gc_required = false;
3175		if (!err)
3176			goto retry;
3177	}
3178
3179	return err;
3180}
3181
3182int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3183{
3184	int i;
3185	int err = 0;
3186
3187	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3188	down_write(&SIT_I(sbi)->sentry_lock);
3189	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
3190		err += __allocate_new_segment(sbi, i, false, false);
3191	up_write(&SIT_I(sbi)->sentry_lock);
3192	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3193
3194	return err;
3195}
3196
3197bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3198						struct cp_control *cpc)
3199{
3200	__u64 trim_start = cpc->trim_start;
3201	bool has_candidate = false;
3202
3203	down_write(&SIT_I(sbi)->sentry_lock);
3204	for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
3205		if (add_discard_addrs(sbi, cpc, true)) {
3206			has_candidate = true;
3207			break;
3208		}
3209	}
3210	up_write(&SIT_I(sbi)->sentry_lock);
3211
3212	cpc->trim_start = trim_start;
3213	return has_candidate;
3214}
3215
3216static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3217					struct discard_policy *dpolicy,
3218					unsigned int start, unsigned int end)
3219{
3220	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3221	struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3222	struct rb_node **insert_p = NULL, *insert_parent = NULL;
3223	struct discard_cmd *dc;
3224	struct blk_plug plug;
3225	int issued;
3226	unsigned int trimmed = 0;
3227
3228next:
3229	issued = 0;
3230
3231	mutex_lock(&dcc->cmd_lock);
3232	if (unlikely(dcc->rbtree_check))
3233		f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
3234
3235	dc = __lookup_discard_cmd_ret(&dcc->root, start,
3236				&prev_dc, &next_dc, &insert_p, &insert_parent);
3237	if (!dc)
3238		dc = next_dc;
3239
3240	blk_start_plug(&plug);
3241
3242	while (dc && dc->di.lstart <= end) {
3243		struct rb_node *node;
3244		int err = 0;
3245
3246		if (dc->di.len < dpolicy->granularity)
3247			goto skip;
3248
3249		if (dc->state != D_PREP) {
3250			list_move_tail(&dc->list, &dcc->fstrim_list);
3251			goto skip;
3252		}
3253
3254		err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3255
3256		if (issued >= dpolicy->max_requests) {
3257			start = dc->di.lstart + dc->di.len;
3258
3259			if (err)
3260				__remove_discard_cmd(sbi, dc);
3261
3262			blk_finish_plug(&plug);
3263			mutex_unlock(&dcc->cmd_lock);
3264			trimmed += __wait_all_discard_cmd(sbi, NULL);
3265			f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
3266			goto next;
3267		}
3268skip:
3269		node = rb_next(&dc->rb_node);
3270		if (err)
3271			__remove_discard_cmd(sbi, dc);
3272		dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3273
3274		if (fatal_signal_pending(current))
3275			break;
3276	}
3277
3278	blk_finish_plug(&plug);
3279	mutex_unlock(&dcc->cmd_lock);
3280
3281	return trimmed;
3282}
3283
3284int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3285{
3286	__u64 start = F2FS_BYTES_TO_BLK(range->start);
3287	__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3288	unsigned int start_segno, end_segno;
3289	block_t start_block, end_block;
3290	struct cp_control cpc;
3291	struct discard_policy dpolicy;
3292	unsigned long long trimmed = 0;
3293	int err = 0;
3294	bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3295
3296	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3297		return -EINVAL;
3298
3299	if (end < MAIN_BLKADDR(sbi))
3300		goto out;
3301
3302	if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3303		f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3304		return -EFSCORRUPTED;
3305	}
3306
3307	/* start/end segment number in main_area */
3308	start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3309	end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3310						GET_SEGNO(sbi, end);
3311	if (need_align) {
3312		start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi));
3313		end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1;
3314	}
3315
3316	cpc.reason = CP_DISCARD;
3317	cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3318	cpc.trim_start = start_segno;
3319	cpc.trim_end = end_segno;
3320
3321	if (sbi->discard_blks == 0)
3322		goto out;
3323
3324	f2fs_down_write(&sbi->gc_lock);
3325	stat_inc_cp_call_count(sbi, TOTAL_CALL);
3326	err = f2fs_write_checkpoint(sbi, &cpc);
3327	f2fs_up_write(&sbi->gc_lock);
3328	if (err)
3329		goto out;
3330
3331	/*
3332	 * We filed discard candidates, but actually we don't need to wait for
3333	 * all of them, since they'll be issued in idle time along with runtime
3334	 * discard option. User configuration looks like using runtime discard
3335	 * or periodic fstrim instead of it.
3336	 */
3337	if (f2fs_realtime_discard_enable(sbi))
3338		goto out;
3339
3340	start_block = START_BLOCK(sbi, start_segno);
3341	end_block = START_BLOCK(sbi, end_segno + 1);
3342
3343	__init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3344	trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3345					start_block, end_block);
3346
3347	trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3348					start_block, end_block);
3349out:
3350	if (!err)
3351		range->len = F2FS_BLK_TO_BYTES(trimmed);
3352	return err;
3353}
3354
3355int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3356{
3357	switch (hint) {
3358	case WRITE_LIFE_SHORT:
3359		return CURSEG_HOT_DATA;
3360	case WRITE_LIFE_EXTREME:
3361		return CURSEG_COLD_DATA;
3362	default:
3363		return CURSEG_WARM_DATA;
3364	}
3365}
3366
3367static int __get_segment_type_2(struct f2fs_io_info *fio)
3368{
3369	if (fio->type == DATA)
3370		return CURSEG_HOT_DATA;
3371	else
3372		return CURSEG_HOT_NODE;
3373}
3374
3375static int __get_segment_type_4(struct f2fs_io_info *fio)
3376{
3377	if (fio->type == DATA) {
3378		struct inode *inode = fio->page->mapping->host;
3379
3380		if (S_ISDIR(inode->i_mode))
3381			return CURSEG_HOT_DATA;
3382		else
3383			return CURSEG_COLD_DATA;
3384	} else {
3385		if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3386			return CURSEG_WARM_NODE;
3387		else
3388			return CURSEG_COLD_NODE;
3389	}
3390}
3391
3392static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
3393{
3394	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3395	struct extent_info ei = {};
3396
3397	if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
3398		if (!ei.age)
3399			return NO_CHECK_TYPE;
3400		if (ei.age <= sbi->hot_data_age_threshold)
3401			return CURSEG_HOT_DATA;
3402		if (ei.age <= sbi->warm_data_age_threshold)
3403			return CURSEG_WARM_DATA;
3404		return CURSEG_COLD_DATA;
3405	}
3406	return NO_CHECK_TYPE;
3407}
3408
3409static int __get_segment_type_6(struct f2fs_io_info *fio)
3410{
3411	if (fio->type == DATA) {
3412		struct inode *inode = fio->page->mapping->host;
3413		int type;
3414
3415		if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3416			return CURSEG_COLD_DATA_PINNED;
3417
3418		if (page_private_gcing(fio->page)) {
3419			if (fio->sbi->am.atgc_enabled &&
3420				(fio->io_type == FS_DATA_IO) &&
3421				(fio->sbi->gc_mode != GC_URGENT_HIGH))
3422				return CURSEG_ALL_DATA_ATGC;
3423			else
3424				return CURSEG_COLD_DATA;
3425		}
3426		if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3427			return CURSEG_COLD_DATA;
3428
3429		type = __get_age_segment_type(inode, fio->page->index);
3430		if (type != NO_CHECK_TYPE)
3431			return type;
3432
3433		if (file_is_hot(inode) ||
3434				is_inode_flag_set(inode, FI_HOT_DATA) ||
3435				f2fs_is_cow_file(inode))
3436			return CURSEG_HOT_DATA;
3437		return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3438	} else {
3439		if (IS_DNODE(fio->page))
3440			return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3441						CURSEG_HOT_NODE;
3442		return CURSEG_COLD_NODE;
3443	}
3444}
3445
3446static int __get_segment_type(struct f2fs_io_info *fio)
3447{
3448	int type = 0;
3449
3450	switch (F2FS_OPTION(fio->sbi).active_logs) {
3451	case 2:
3452		type = __get_segment_type_2(fio);
3453		break;
3454	case 4:
3455		type = __get_segment_type_4(fio);
3456		break;
3457	case 6:
3458		type = __get_segment_type_6(fio);
3459		break;
3460	default:
3461		f2fs_bug_on(fio->sbi, true);
3462	}
3463
3464	if (IS_HOT(type))
3465		fio->temp = HOT;
3466	else if (IS_WARM(type))
3467		fio->temp = WARM;
3468	else
3469		fio->temp = COLD;
3470	return type;
3471}
3472
3473static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
3474		struct curseg_info *seg)
3475{
3476	/* To allocate block chunks in different sizes, use random number */
3477	if (--seg->fragment_remained_chunk > 0)
3478		return;
3479
3480	seg->fragment_remained_chunk =
3481		get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3482	seg->next_blkoff +=
3483		get_random_u32_inclusive(1, sbi->max_fragment_hole);
3484}
3485
3486static void reset_curseg_fields(struct curseg_info *curseg)
3487{
3488	curseg->inited = false;
3489	curseg->segno = NULL_SEGNO;
3490	curseg->next_segno = 0;
3491}
3492
3493int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3494		block_t old_blkaddr, block_t *new_blkaddr,
3495		struct f2fs_summary *sum, int type,
3496		struct f2fs_io_info *fio)
3497{
3498	struct sit_info *sit_i = SIT_I(sbi);
3499	struct curseg_info *curseg = CURSEG_I(sbi, type);
3500	unsigned long long old_mtime;
3501	bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3502	struct seg_entry *se = NULL;
3503	bool segment_full = false;
3504	int ret = 0;
3505
3506	f2fs_down_read(&SM_I(sbi)->curseg_lock);
3507
3508	mutex_lock(&curseg->curseg_mutex);
3509	down_write(&sit_i->sentry_lock);
3510
3511	if (curseg->segno == NULL_SEGNO) {
3512		ret = -ENOSPC;
3513		goto out_err;
3514	}
3515
3516	if (from_gc) {
3517		f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3518		se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3519		sanity_check_seg_type(sbi, se->type);
3520		f2fs_bug_on(sbi, IS_NODESEG(se->type));
3521	}
3522	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3523
3524	f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi));
3525
3526	f2fs_wait_discard_bio(sbi, *new_blkaddr);
3527
3528	curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
3529	if (curseg->alloc_type == SSR) {
3530		curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
3531	} else {
3532		curseg->next_blkoff++;
3533		if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3534			f2fs_randomize_chunk(sbi, curseg);
3535	}
3536	if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
3537		segment_full = true;
3538	stat_inc_block_count(sbi, curseg);
3539
3540	if (from_gc) {
3541		old_mtime = get_segment_mtime(sbi, old_blkaddr);
3542	} else {
3543		update_segment_mtime(sbi, old_blkaddr, 0);
3544		old_mtime = 0;
3545	}
3546	update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3547
3548	/*
3549	 * SIT information should be updated before segment allocation,
3550	 * since SSR needs latest valid block information.
3551	 */
3552	update_sit_entry(sbi, *new_blkaddr, 1);
3553	update_sit_entry(sbi, old_blkaddr, -1);
3554
3555	/*
3556	 * If the current segment is full, flush it out and replace it with a
3557	 * new segment.
3558	 */
3559	if (segment_full) {
3560		if (type == CURSEG_COLD_DATA_PINNED &&
3561		    !((curseg->segno + 1) % sbi->segs_per_sec)) {
3562			write_sum_page(sbi, curseg->sum_blk,
3563					GET_SUM_BLOCK(sbi, curseg->segno));
3564			reset_curseg_fields(curseg);
3565			goto skip_new_segment;
3566		}
3567
3568		if (from_gc) {
3569			ret = get_atssr_segment(sbi, type, se->type,
3570						AT_SSR, se->mtime);
3571		} else {
3572			if (need_new_seg(sbi, type))
3573				ret = new_curseg(sbi, type, false);
3574			else
3575				ret = change_curseg(sbi, type);
3576			stat_inc_seg_type(sbi, curseg);
3577		}
3578
3579		if (ret)
3580			goto out_err;
3581	}
3582
3583skip_new_segment:
3584	/*
3585	 * segment dirty status should be updated after segment allocation,
3586	 * so we just need to update status only one time after previous
3587	 * segment being closed.
3588	 */
3589	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3590	locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3591
3592	if (IS_DATASEG(curseg->seg_type))
3593		atomic64_inc(&sbi->allocated_data_blocks);
3594
3595	up_write(&sit_i->sentry_lock);
3596
3597	if (page && IS_NODESEG(curseg->seg_type)) {
3598		fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3599
3600		f2fs_inode_chksum_set(sbi, page);
3601	}
3602
3603	if (fio) {
3604		struct f2fs_bio_info *io;
3605
3606		INIT_LIST_HEAD(&fio->list);
3607		fio->in_list = 1;
3608		io = sbi->write_io[fio->type] + fio->temp;
3609		spin_lock(&io->io_lock);
3610		list_add_tail(&fio->list, &io->io_list);
3611		spin_unlock(&io->io_lock);
3612	}
3613
3614	mutex_unlock(&curseg->curseg_mutex);
3615	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3616	return 0;
3617out_err:
3618	*new_blkaddr = NULL_ADDR;
3619	up_write(&sit_i->sentry_lock);
3620	mutex_unlock(&curseg->curseg_mutex);
3621	f2fs_up_read(&SM_I(sbi)->curseg_lock);
3622	return ret;
3623
3624}
3625
3626void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3627					block_t blkaddr, unsigned int blkcnt)
3628{
3629	if (!f2fs_is_multi_device(sbi))
3630		return;
3631
3632	while (1) {
3633		unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3634		unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
3635
3636		/* update device state for fsync */
3637		f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3638
3639		/* update device state for checkpoint */
3640		if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3641			spin_lock(&sbi->dev_lock);
3642			f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3643			spin_unlock(&sbi->dev_lock);
3644		}
3645
3646		if (blkcnt <= blks)
3647			break;
3648		blkcnt -= blks;
3649		blkaddr += blks;
3650	}
3651}
3652
3653static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3654{
3655	int type = __get_segment_type(fio);
3656	bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3657
3658	if (keep_order)
3659		f2fs_down_read(&fio->sbi->io_order_lock);
3660
3661	if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3662			&fio->new_blkaddr, sum, type, fio)) {
3663		if (fscrypt_inode_uses_fs_layer_crypto(fio->page->mapping->host))
3664			fscrypt_finalize_bounce_page(&fio->encrypted_page);
3665		if (PageWriteback(fio->page))
3666			end_page_writeback(fio->page);
3667		if (f2fs_in_warm_node_list(fio->sbi, fio->page))
3668			f2fs_del_fsync_node_entry(fio->sbi, fio->page);
3669		goto out;
3670	}
3671	if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3672		f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
3673
3674	/* writeout dirty page into bdev */
3675	f2fs_submit_page_write(fio);
3676
3677	f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3678out:
3679	if (keep_order)
3680		f2fs_up_read(&fio->sbi->io_order_lock);
3681}
3682
3683void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3684					enum iostat_type io_type)
3685{
3686	struct f2fs_io_info fio = {
3687		.sbi = sbi,
3688		.type = META,
3689		.temp = HOT,
3690		.op = REQ_OP_WRITE,
3691		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3692		.old_blkaddr = page->index,
3693		.new_blkaddr = page->index,
3694		.page = page,
3695		.encrypted_page = NULL,
3696		.in_list = 0,
3697	};
3698
3699	if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3700		fio.op_flags &= ~REQ_META;
3701
3702	set_page_writeback(page);
3703	f2fs_submit_page_write(&fio);
3704
3705	stat_inc_meta_count(sbi, page->index);
3706	f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
3707}
3708
3709void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3710{
3711	struct f2fs_summary sum;
3712
3713	set_summary(&sum, nid, 0, 0);
3714	do_write_page(&sum, fio);
3715
3716	f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
3717}
3718
3719void f2fs_outplace_write_data(struct dnode_of_data *dn,
3720					struct f2fs_io_info *fio)
3721{
3722	struct f2fs_sb_info *sbi = fio->sbi;
3723	struct f2fs_summary sum;
3724
3725	f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3726	if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO)
3727		f2fs_update_age_extent_cache(dn);
3728	set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3729	do_write_page(&sum, fio);
3730	f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3731
3732	f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
3733}
3734
3735int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3736{
3737	int err;
3738	struct f2fs_sb_info *sbi = fio->sbi;
3739	unsigned int segno;
3740
3741	fio->new_blkaddr = fio->old_blkaddr;
3742	/* i/o temperature is needed for passing down write hints */
3743	__get_segment_type(fio);
3744
3745	segno = GET_SEGNO(sbi, fio->new_blkaddr);
3746
3747	if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3748		set_sbi_flag(sbi, SBI_NEED_FSCK);
3749		f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3750			  __func__, segno);
3751		err = -EFSCORRUPTED;
3752		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
3753		goto drop_bio;
3754	}
3755
3756	if (f2fs_cp_error(sbi)) {
3757		err = -EIO;
3758		goto drop_bio;
3759	}
3760
3761	if (fio->post_read)
3762		f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
3763
3764	stat_inc_inplace_blocks(fio->sbi);
3765
3766	if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
3767		err = f2fs_merge_page_bio(fio);
3768	else
3769		err = f2fs_submit_page_bio(fio);
3770	if (!err) {
3771		f2fs_update_device_state(fio->sbi, fio->ino,
3772						fio->new_blkaddr, 1);
3773		f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
3774						fio->io_type, F2FS_BLKSIZE);
3775	}
3776
3777	return err;
3778drop_bio:
3779	if (fio->bio && *(fio->bio)) {
3780		struct bio *bio = *(fio->bio);
3781
3782		bio->bi_status = BLK_STS_IOERR;
3783		bio_endio(bio);
3784		*(fio->bio) = NULL;
3785	}
3786	return err;
3787}
3788
3789static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3790						unsigned int segno)
3791{
3792	int i;
3793
3794	for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3795		if (CURSEG_I(sbi, i)->segno == segno)
3796			break;
3797	}
3798	return i;
3799}
3800
3801void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3802				block_t old_blkaddr, block_t new_blkaddr,
3803				bool recover_curseg, bool recover_newaddr,
3804				bool from_gc)
3805{
3806	struct sit_info *sit_i = SIT_I(sbi);
3807	struct curseg_info *curseg;
3808	unsigned int segno, old_cursegno;
3809	struct seg_entry *se;
3810	int type;
3811	unsigned short old_blkoff;
3812	unsigned char old_alloc_type;
3813
3814	segno = GET_SEGNO(sbi, new_blkaddr);
3815	se = get_seg_entry(sbi, segno);
3816	type = se->type;
3817
3818	f2fs_down_write(&SM_I(sbi)->curseg_lock);
3819
3820	if (!recover_curseg) {
3821		/* for recovery flow */
3822		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3823			if (old_blkaddr == NULL_ADDR)
3824				type = CURSEG_COLD_DATA;
3825			else
3826				type = CURSEG_WARM_DATA;
3827		}
3828	} else {
3829		if (IS_CURSEG(sbi, segno)) {
3830			/* se->type is volatile as SSR allocation */
3831			type = __f2fs_get_curseg(sbi, segno);
3832			f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3833		} else {
3834			type = CURSEG_WARM_DATA;
3835		}
3836	}
3837
3838	f2fs_bug_on(sbi, !IS_DATASEG(type));
3839	curseg = CURSEG_I(sbi, type);
3840
3841	mutex_lock(&curseg->curseg_mutex);
3842	down_write(&sit_i->sentry_lock);
3843
3844	old_cursegno = curseg->segno;
3845	old_blkoff = curseg->next_blkoff;
3846	old_alloc_type = curseg->alloc_type;
3847
3848	/* change the current segment */
3849	if (segno != curseg->segno) {
3850		curseg->next_segno = segno;
3851		if (change_curseg(sbi, type))
3852			goto out_unlock;
3853	}
3854
3855	curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3856	curseg->sum_blk->entries[curseg->next_blkoff] = *sum;
3857
3858	if (!recover_curseg || recover_newaddr) {
3859		if (!from_gc)
3860			update_segment_mtime(sbi, new_blkaddr, 0);
3861		update_sit_entry(sbi, new_blkaddr, 1);
3862	}
3863	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3864		f2fs_invalidate_internal_cache(sbi, old_blkaddr);
3865		if (!from_gc)
3866			update_segment_mtime(sbi, old_blkaddr, 0);
3867		update_sit_entry(sbi, old_blkaddr, -1);
3868	}
3869
3870	locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3871	locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3872
3873	locate_dirty_segment(sbi, old_cursegno);
3874
3875	if (recover_curseg) {
3876		if (old_cursegno != curseg->segno) {
3877			curseg->next_segno = old_cursegno;
3878			if (change_curseg(sbi, type))
3879				goto out_unlock;
3880		}
3881		curseg->next_blkoff = old_blkoff;
3882		curseg->alloc_type = old_alloc_type;
3883	}
3884
3885out_unlock:
3886	up_write(&sit_i->sentry_lock);
3887	mutex_unlock(&curseg->curseg_mutex);
3888	f2fs_up_write(&SM_I(sbi)->curseg_lock);
3889}
3890
3891void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3892				block_t old_addr, block_t new_addr,
3893				unsigned char version, bool recover_curseg,
3894				bool recover_newaddr)
3895{
3896	struct f2fs_summary sum;
3897
3898	set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3899
3900	f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3901					recover_curseg, recover_newaddr, false);
3902
3903	f2fs_update_data_blkaddr(dn, new_addr);
3904}
3905
3906void f2fs_wait_on_page_writeback(struct page *page,
3907				enum page_type type, bool ordered, bool locked)
3908{
3909	if (PageWriteback(page)) {
3910		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3911
3912		/* submit cached LFS IO */
3913		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3914		/* submit cached IPU IO */
3915		f2fs_submit_merged_ipu_write(sbi, NULL, page);
3916		if (ordered) {
3917			wait_on_page_writeback(page);
3918			f2fs_bug_on(sbi, locked && PageWriteback(page));
3919		} else {
3920			wait_for_stable_page(page);
3921		}
3922	}
3923}
3924
3925void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3926{
3927	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3928	struct page *cpage;
3929
3930	if (!f2fs_post_read_required(inode))
3931		return;
3932
3933	if (!__is_valid_data_blkaddr(blkaddr))
3934		return;
3935
3936	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3937	if (cpage) {
3938		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3939		f2fs_put_page(cpage, 1);
3940	}
3941}
3942
3943void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3944								block_t len)
3945{
3946	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3947	block_t i;
3948
3949	if (!f2fs_post_read_required(inode))
3950		return;
3951
3952	for (i = 0; i < len; i++)
3953		f2fs_wait_on_block_writeback(inode, blkaddr + i);
3954
3955	f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
3956}
3957
3958static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3959{
3960	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3961	struct curseg_info *seg_i;
3962	unsigned char *kaddr;
3963	struct page *page;
3964	block_t start;
3965	int i, j, offset;
3966
3967	start = start_sum_block(sbi);
3968
3969	page = f2fs_get_meta_page(sbi, start++);
3970	if (IS_ERR(page))
3971		return PTR_ERR(page);
3972	kaddr = (unsigned char *)page_address(page);
3973
3974	/* Step 1: restore nat cache */
3975	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3976	memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3977
3978	/* Step 2: restore sit cache */
3979	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3980	memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3981	offset = 2 * SUM_JOURNAL_SIZE;
3982
3983	/* Step 3: restore summary entries */
3984	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3985		unsigned short blk_off;
3986		unsigned int segno;
3987
3988		seg_i = CURSEG_I(sbi, i);
3989		segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3990		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3991		seg_i->next_segno = segno;
3992		reset_curseg(sbi, i, 0);
3993		seg_i->alloc_type = ckpt->alloc_type[i];
3994		seg_i->next_blkoff = blk_off;
3995
3996		if (seg_i->alloc_type == SSR)
3997			blk_off = BLKS_PER_SEG(sbi);
3998
3999		for (j = 0; j < blk_off; j++) {
4000			struct f2fs_summary *s;
4001
4002			s = (struct f2fs_summary *)(kaddr + offset);
4003			seg_i->sum_blk->entries[j] = *s;
4004			offset += SUMMARY_SIZE;
4005			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
4006						SUM_FOOTER_SIZE)
4007				continue;
4008
4009			f2fs_put_page(page, 1);
4010			page = NULL;
4011
4012			page = f2fs_get_meta_page(sbi, start++);
4013			if (IS_ERR(page))
4014				return PTR_ERR(page);
4015			kaddr = (unsigned char *)page_address(page);
4016			offset = 0;
4017		}
4018	}
4019	f2fs_put_page(page, 1);
4020	return 0;
4021}
4022
4023static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
4024{
4025	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4026	struct f2fs_summary_block *sum;
4027	struct curseg_info *curseg;
4028	struct page *new;
4029	unsigned short blk_off;
4030	unsigned int segno = 0;
4031	block_t blk_addr = 0;
4032	int err = 0;
4033
4034	/* get segment number and block addr */
4035	if (IS_DATASEG(type)) {
4036		segno = le32_to_cpu(ckpt->cur_data_segno[type]);
4037		blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
4038							CURSEG_HOT_DATA]);
4039		if (__exist_node_summaries(sbi))
4040			blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
4041		else
4042			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
4043	} else {
4044		segno = le32_to_cpu(ckpt->cur_node_segno[type -
4045							CURSEG_HOT_NODE]);
4046		blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
4047							CURSEG_HOT_NODE]);
4048		if (__exist_node_summaries(sbi))
4049			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
4050							type - CURSEG_HOT_NODE);
4051		else
4052			blk_addr = GET_SUM_BLOCK(sbi, segno);
4053	}
4054
4055	new = f2fs_get_meta_page(sbi, blk_addr);
4056	if (IS_ERR(new))
4057		return PTR_ERR(new);
4058	sum = (struct f2fs_summary_block *)page_address(new);
4059
4060	if (IS_NODESEG(type)) {
4061		if (__exist_node_summaries(sbi)) {
4062			struct f2fs_summary *ns = &sum->entries[0];
4063			int i;
4064
4065			for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) {
4066				ns->version = 0;
4067				ns->ofs_in_node = 0;
4068			}
4069		} else {
4070			err = f2fs_restore_node_summary(sbi, segno, sum);
4071			if (err)
4072				goto out;
4073		}
4074	}
4075
4076	/* set uncompleted segment to curseg */
4077	curseg = CURSEG_I(sbi, type);
4078	mutex_lock(&curseg->curseg_mutex);
4079
4080	/* update journal info */
4081	down_write(&curseg->journal_rwsem);
4082	memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
4083	up_write(&curseg->journal_rwsem);
4084
4085	memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
4086	memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
4087	curseg->next_segno = segno;
4088	reset_curseg(sbi, type, 0);
4089	curseg->alloc_type = ckpt->alloc_type[type];
4090	curseg->next_blkoff = blk_off;
4091	mutex_unlock(&curseg->curseg_mutex);
4092out:
4093	f2fs_put_page(new, 1);
4094	return err;
4095}
4096
4097static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
4098{
4099	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
4100	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
4101	int type = CURSEG_HOT_DATA;
4102	int err;
4103
4104	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
4105		int npages = f2fs_npages_for_summary_flush(sbi, true);
4106
4107		if (npages >= 2)
4108			f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4109							META_CP, true);
4110
4111		/* restore for compacted data summary */
4112		err = read_compacted_summaries(sbi);
4113		if (err)
4114			return err;
4115		type = CURSEG_HOT_NODE;
4116	}
4117
4118	if (__exist_node_summaries(sbi))
4119		f2fs_ra_meta_pages(sbi,
4120				sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4121				NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
4122
4123	for (; type <= CURSEG_COLD_NODE; type++) {
4124		err = read_normal_summaries(sbi, type);
4125		if (err)
4126			return err;
4127	}
4128
4129	/* sanity check for summary blocks */
4130	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
4131			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
4132		f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4133			 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
4134		return -EINVAL;
4135	}
4136
4137	return 0;
4138}
4139
4140static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4141{
4142	struct page *page;
4143	unsigned char *kaddr;
4144	struct f2fs_summary *summary;
4145	struct curseg_info *seg_i;
4146	int written_size = 0;
4147	int i, j;
4148
4149	page = f2fs_grab_meta_page(sbi, blkaddr++);
4150	kaddr = (unsigned char *)page_address(page);
4151	memset(kaddr, 0, PAGE_SIZE);
4152
4153	/* Step 1: write nat cache */
4154	seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4155	memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
4156	written_size += SUM_JOURNAL_SIZE;
4157
4158	/* Step 2: write sit cache */
4159	seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4160	memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
4161	written_size += SUM_JOURNAL_SIZE;
4162
4163	/* Step 3: write summary entries */
4164	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
4165		seg_i = CURSEG_I(sbi, i);
4166		for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4167			if (!page) {
4168				page = f2fs_grab_meta_page(sbi, blkaddr++);
4169				kaddr = (unsigned char *)page_address(page);
4170				memset(kaddr, 0, PAGE_SIZE);
4171				written_size = 0;
4172			}
4173			summary = (struct f2fs_summary *)(kaddr + written_size);
4174			*summary = seg_i->sum_blk->entries[j];
4175			written_size += SUMMARY_SIZE;
4176
4177			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
4178							SUM_FOOTER_SIZE)
4179				continue;
4180
4181			set_page_dirty(page);
4182			f2fs_put_page(page, 1);
4183			page = NULL;
4184		}
4185	}
4186	if (page) {
4187		set_page_dirty(page);
4188		f2fs_put_page(page, 1);
4189	}
4190}
4191
4192static void write_normal_summaries(struct f2fs_sb_info *sbi,
4193					block_t blkaddr, int type)
4194{
4195	int i, end;
4196
4197	if (IS_DATASEG(type))
4198		end = type + NR_CURSEG_DATA_TYPE;
4199	else
4200		end = type + NR_CURSEG_NODE_TYPE;
4201
4202	for (i = type; i < end; i++)
4203		write_current_sum_page(sbi, i, blkaddr + (i - type));
4204}
4205
4206void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4207{
4208	if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4209		write_compacted_summaries(sbi, start_blk);
4210	else
4211		write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4212}
4213
4214void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4215{
4216	write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4217}
4218
4219int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4220					unsigned int val, int alloc)
4221{
4222	int i;
4223
4224	if (type == NAT_JOURNAL) {
4225		for (i = 0; i < nats_in_cursum(journal); i++) {
4226			if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4227				return i;
4228		}
4229		if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4230			return update_nats_in_cursum(journal, 1);
4231	} else if (type == SIT_JOURNAL) {
4232		for (i = 0; i < sits_in_cursum(journal); i++)
4233			if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4234				return i;
4235		if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4236			return update_sits_in_cursum(journal, 1);
4237	}
4238	return -1;
4239}
4240
4241static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4242					unsigned int segno)
4243{
4244	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4245}
4246
4247static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4248					unsigned int start)
4249{
4250	struct sit_info *sit_i = SIT_I(sbi);
4251	struct page *page;
4252	pgoff_t src_off, dst_off;
4253
4254	src_off = current_sit_addr(sbi, start);
4255	dst_off = next_sit_addr(sbi, src_off);
4256
4257	page = f2fs_grab_meta_page(sbi, dst_off);
4258	seg_info_to_sit_page(sbi, page, start);
4259
4260	set_page_dirty(page);
4261	set_to_next_sit(sit_i, start);
4262
4263	return page;
4264}
4265
4266static struct sit_entry_set *grab_sit_entry_set(void)
4267{
4268	struct sit_entry_set *ses =
4269			f2fs_kmem_cache_alloc(sit_entry_set_slab,
4270						GFP_NOFS, true, NULL);
4271
4272	ses->entry_cnt = 0;
4273	INIT_LIST_HEAD(&ses->set_list);
4274	return ses;
4275}
4276
4277static void release_sit_entry_set(struct sit_entry_set *ses)
4278{
4279	list_del(&ses->set_list);
4280	kmem_cache_free(sit_entry_set_slab, ses);
4281}
4282
4283static void adjust_sit_entry_set(struct sit_entry_set *ses,
4284						struct list_head *head)
4285{
4286	struct sit_entry_set *next = ses;
4287
4288	if (list_is_last(&ses->set_list, head))
4289		return;
4290
4291	list_for_each_entry_continue(next, head, set_list)
4292		if (ses->entry_cnt <= next->entry_cnt) {
4293			list_move_tail(&ses->set_list, &next->set_list);
4294			return;
4295		}
4296
4297	list_move_tail(&ses->set_list, head);
4298}
4299
4300static void add_sit_entry(unsigned int segno, struct list_head *head)
4301{
4302	struct sit_entry_set *ses;
4303	unsigned int start_segno = START_SEGNO(segno);
4304
4305	list_for_each_entry(ses, head, set_list) {
4306		if (ses->start_segno == start_segno) {
4307			ses->entry_cnt++;
4308			adjust_sit_entry_set(ses, head);
4309			return;
4310		}
4311	}
4312
4313	ses = grab_sit_entry_set();
4314
4315	ses->start_segno = start_segno;
4316	ses->entry_cnt++;
4317	list_add(&ses->set_list, head);
4318}
4319
4320static void add_sits_in_set(struct f2fs_sb_info *sbi)
4321{
4322	struct f2fs_sm_info *sm_info = SM_I(sbi);
4323	struct list_head *set_list = &sm_info->sit_entry_set;
4324	unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4325	unsigned int segno;
4326
4327	for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4328		add_sit_entry(segno, set_list);
4329}
4330
4331static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4332{
4333	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4334	struct f2fs_journal *journal = curseg->journal;
4335	int i;
4336
4337	down_write(&curseg->journal_rwsem);
4338	for (i = 0; i < sits_in_cursum(journal); i++) {
4339		unsigned int segno;
4340		bool dirtied;
4341
4342		segno = le32_to_cpu(segno_in_journal(journal, i));
4343		dirtied = __mark_sit_entry_dirty(sbi, segno);
4344
4345		if (!dirtied)
4346			add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4347	}
4348	update_sits_in_cursum(journal, -i);
4349	up_write(&curseg->journal_rwsem);
4350}
4351
4352/*
4353 * CP calls this function, which flushes SIT entries including sit_journal,
4354 * and moves prefree segs to free segs.
4355 */
4356void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4357{
4358	struct sit_info *sit_i = SIT_I(sbi);
4359	unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4360	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4361	struct f2fs_journal *journal = curseg->journal;
4362	struct sit_entry_set *ses, *tmp;
4363	struct list_head *head = &SM_I(sbi)->sit_entry_set;
4364	bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4365	struct seg_entry *se;
4366
4367	down_write(&sit_i->sentry_lock);
4368
4369	if (!sit_i->dirty_sentries)
4370		goto out;
4371
4372	/*
4373	 * add and account sit entries of dirty bitmap in sit entry
4374	 * set temporarily
4375	 */
4376	add_sits_in_set(sbi);
4377
4378	/*
4379	 * if there are no enough space in journal to store dirty sit
4380	 * entries, remove all entries from journal and add and account
4381	 * them in sit entry set.
4382	 */
4383	if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4384								!to_journal)
4385		remove_sits_in_journal(sbi);
4386
4387	/*
4388	 * there are two steps to flush sit entries:
4389	 * #1, flush sit entries to journal in current cold data summary block.
4390	 * #2, flush sit entries to sit page.
4391	 */
4392	list_for_each_entry_safe(ses, tmp, head, set_list) {
4393		struct page *page = NULL;
4394		struct f2fs_sit_block *raw_sit = NULL;
4395		unsigned int start_segno = ses->start_segno;
4396		unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4397						(unsigned long)MAIN_SEGS(sbi));
4398		unsigned int segno = start_segno;
4399
4400		if (to_journal &&
4401			!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4402			to_journal = false;
4403
4404		if (to_journal) {
4405			down_write(&curseg->journal_rwsem);
4406		} else {
4407			page = get_next_sit_page(sbi, start_segno);
4408			raw_sit = page_address(page);
4409		}
4410
4411		/* flush dirty sit entries in region of current sit set */
4412		for_each_set_bit_from(segno, bitmap, end) {
4413			int offset, sit_offset;
4414
4415			se = get_seg_entry(sbi, segno);
4416#ifdef CONFIG_F2FS_CHECK_FS
4417			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4418						SIT_VBLOCK_MAP_SIZE))
4419				f2fs_bug_on(sbi, 1);
4420#endif
4421
4422			/* add discard candidates */
4423			if (!(cpc->reason & CP_DISCARD)) {
4424				cpc->trim_start = segno;
4425				add_discard_addrs(sbi, cpc, false);
4426			}
4427
4428			if (to_journal) {
4429				offset = f2fs_lookup_journal_in_cursum(journal,
4430							SIT_JOURNAL, segno, 1);
4431				f2fs_bug_on(sbi, offset < 0);
4432				segno_in_journal(journal, offset) =
4433							cpu_to_le32(segno);
4434				seg_info_to_raw_sit(se,
4435					&sit_in_journal(journal, offset));
4436				check_block_count(sbi, segno,
4437					&sit_in_journal(journal, offset));
4438			} else {
4439				sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4440				seg_info_to_raw_sit(se,
4441						&raw_sit->entries[sit_offset]);
4442				check_block_count(sbi, segno,
4443						&raw_sit->entries[sit_offset]);
4444			}
4445
4446			__clear_bit(segno, bitmap);
4447			sit_i->dirty_sentries--;
4448			ses->entry_cnt--;
4449		}
4450
4451		if (to_journal)
4452			up_write(&curseg->journal_rwsem);
4453		else
4454			f2fs_put_page(page, 1);
4455
4456		f2fs_bug_on(sbi, ses->entry_cnt);
4457		release_sit_entry_set(ses);
4458	}
4459
4460	f2fs_bug_on(sbi, !list_empty(head));
4461	f2fs_bug_on(sbi, sit_i->dirty_sentries);
4462out:
4463	if (cpc->reason & CP_DISCARD) {
4464		__u64 trim_start = cpc->trim_start;
4465
4466		for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4467			add_discard_addrs(sbi, cpc, false);
4468
4469		cpc->trim_start = trim_start;
4470	}
4471	up_write(&sit_i->sentry_lock);
4472
4473	set_prefree_as_free_segments(sbi);
4474}
4475
4476static int build_sit_info(struct f2fs_sb_info *sbi)
4477{
4478	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4479	struct sit_info *sit_i;
4480	unsigned int sit_segs, start;
4481	char *src_bitmap, *bitmap;
4482	unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4483	unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4484
4485	/* allocate memory for SIT information */
4486	sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4487	if (!sit_i)
4488		return -ENOMEM;
4489
4490	SM_I(sbi)->sit_info = sit_i;
4491
4492	sit_i->sentries =
4493		f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4494					      MAIN_SEGS(sbi)),
4495			      GFP_KERNEL);
4496	if (!sit_i->sentries)
4497		return -ENOMEM;
4498
4499	main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4500	sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4501								GFP_KERNEL);
4502	if (!sit_i->dirty_sentries_bitmap)
4503		return -ENOMEM;
4504
4505#ifdef CONFIG_F2FS_CHECK_FS
4506	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4507#else
4508	bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4509#endif
4510	sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4511	if (!sit_i->bitmap)
4512		return -ENOMEM;
4513
4514	bitmap = sit_i->bitmap;
4515
4516	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4517		sit_i->sentries[start].cur_valid_map = bitmap;
4518		bitmap += SIT_VBLOCK_MAP_SIZE;
4519
4520		sit_i->sentries[start].ckpt_valid_map = bitmap;
4521		bitmap += SIT_VBLOCK_MAP_SIZE;
4522
4523#ifdef CONFIG_F2FS_CHECK_FS
4524		sit_i->sentries[start].cur_valid_map_mir = bitmap;
4525		bitmap += SIT_VBLOCK_MAP_SIZE;
4526#endif
4527
4528		if (discard_map) {
4529			sit_i->sentries[start].discard_map = bitmap;
4530			bitmap += SIT_VBLOCK_MAP_SIZE;
4531		}
4532	}
4533
4534	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4535	if (!sit_i->tmp_map)
4536		return -ENOMEM;
4537
4538	if (__is_large_section(sbi)) {
4539		sit_i->sec_entries =
4540			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4541						      MAIN_SECS(sbi)),
4542				      GFP_KERNEL);
4543		if (!sit_i->sec_entries)
4544			return -ENOMEM;
4545	}
4546
4547	/* get information related with SIT */
4548	sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4549
4550	/* setup SIT bitmap from ckeckpoint pack */
4551	sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4552	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4553
4554	sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4555	if (!sit_i->sit_bitmap)
4556		return -ENOMEM;
4557
4558#ifdef CONFIG_F2FS_CHECK_FS
4559	sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4560					sit_bitmap_size, GFP_KERNEL);
4561	if (!sit_i->sit_bitmap_mir)
4562		return -ENOMEM;
4563
4564	sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4565					main_bitmap_size, GFP_KERNEL);
4566	if (!sit_i->invalid_segmap)
4567		return -ENOMEM;
4568#endif
4569
4570	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4571	sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs);
4572	sit_i->written_valid_blocks = 0;
4573	sit_i->bitmap_size = sit_bitmap_size;
4574	sit_i->dirty_sentries = 0;
4575	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4576	sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4577	sit_i->mounted_time = ktime_get_boottime_seconds();
4578	init_rwsem(&sit_i->sentry_lock);
4579	return 0;
4580}
4581
4582static int build_free_segmap(struct f2fs_sb_info *sbi)
4583{
4584	struct free_segmap_info *free_i;
4585	unsigned int bitmap_size, sec_bitmap_size;
4586
4587	/* allocate memory for free segmap information */
4588	free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4589	if (!free_i)
4590		return -ENOMEM;
4591
4592	SM_I(sbi)->free_info = free_i;
4593
4594	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4595	free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4596	if (!free_i->free_segmap)
4597		return -ENOMEM;
4598
4599	sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4600	free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4601	if (!free_i->free_secmap)
4602		return -ENOMEM;
4603
4604	/* set all segments as dirty temporarily */
4605	memset(free_i->free_segmap, 0xff, bitmap_size);
4606	memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4607
4608	/* init free segmap information */
4609	free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4610	free_i->free_segments = 0;
4611	free_i->free_sections = 0;
4612	spin_lock_init(&free_i->segmap_lock);
4613	return 0;
4614}
4615
4616static int build_curseg(struct f2fs_sb_info *sbi)
4617{
4618	struct curseg_info *array;
4619	int i;
4620
4621	array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4622					sizeof(*array)), GFP_KERNEL);
4623	if (!array)
4624		return -ENOMEM;
4625
4626	SM_I(sbi)->curseg_array = array;
4627
4628	for (i = 0; i < NO_CHECK_TYPE; i++) {
4629		mutex_init(&array[i].curseg_mutex);
4630		array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4631		if (!array[i].sum_blk)
4632			return -ENOMEM;
4633		init_rwsem(&array[i].journal_rwsem);
4634		array[i].journal = f2fs_kzalloc(sbi,
4635				sizeof(struct f2fs_journal), GFP_KERNEL);
4636		if (!array[i].journal)
4637			return -ENOMEM;
4638		if (i < NR_PERSISTENT_LOG)
4639			array[i].seg_type = CURSEG_HOT_DATA + i;
4640		else if (i == CURSEG_COLD_DATA_PINNED)
4641			array[i].seg_type = CURSEG_COLD_DATA;
4642		else if (i == CURSEG_ALL_DATA_ATGC)
4643			array[i].seg_type = CURSEG_COLD_DATA;
4644		reset_curseg_fields(&array[i]);
4645	}
4646	return restore_curseg_summaries(sbi);
4647}
4648
4649static int build_sit_entries(struct f2fs_sb_info *sbi)
4650{
4651	struct sit_info *sit_i = SIT_I(sbi);
4652	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4653	struct f2fs_journal *journal = curseg->journal;
4654	struct seg_entry *se;
4655	struct f2fs_sit_entry sit;
4656	int sit_blk_cnt = SIT_BLK_CNT(sbi);
4657	unsigned int i, start, end;
4658	unsigned int readed, start_blk = 0;
4659	int err = 0;
4660	block_t sit_valid_blocks[2] = {0, 0};
4661
4662	do {
4663		readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4664							META_SIT, true);
4665
4666		start = start_blk * sit_i->sents_per_block;
4667		end = (start_blk + readed) * sit_i->sents_per_block;
4668
4669		for (; start < end && start < MAIN_SEGS(sbi); start++) {
4670			struct f2fs_sit_block *sit_blk;
4671			struct page *page;
4672
4673			se = &sit_i->sentries[start];
4674			page = get_current_sit_page(sbi, start);
4675			if (IS_ERR(page))
4676				return PTR_ERR(page);
4677			sit_blk = (struct f2fs_sit_block *)page_address(page);
4678			sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4679			f2fs_put_page(page, 1);
4680
4681			err = check_block_count(sbi, start, &sit);
4682			if (err)
4683				return err;
4684			seg_info_from_raw_sit(se, &sit);
4685
4686			if (se->type >= NR_PERSISTENT_LOG) {
4687				f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4688							se->type, start);
4689				f2fs_handle_error(sbi,
4690						ERROR_INCONSISTENT_SUM_TYPE);
4691				return -EFSCORRUPTED;
4692			}
4693
4694			sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4695
4696			if (!f2fs_block_unit_discard(sbi))
4697				goto init_discard_map_done;
4698
4699			/* build discard map only one time */
4700			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4701				memset(se->discard_map, 0xff,
4702						SIT_VBLOCK_MAP_SIZE);
4703				goto init_discard_map_done;
4704			}
4705			memcpy(se->discard_map, se->cur_valid_map,
4706						SIT_VBLOCK_MAP_SIZE);
4707			sbi->discard_blks += BLKS_PER_SEG(sbi) -
4708						se->valid_blocks;
4709init_discard_map_done:
4710			if (__is_large_section(sbi))
4711				get_sec_entry(sbi, start)->valid_blocks +=
4712							se->valid_blocks;
4713		}
4714		start_blk += readed;
4715	} while (start_blk < sit_blk_cnt);
4716
4717	down_read(&curseg->journal_rwsem);
4718	for (i = 0; i < sits_in_cursum(journal); i++) {
4719		unsigned int old_valid_blocks;
4720
4721		start = le32_to_cpu(segno_in_journal(journal, i));
4722		if (start >= MAIN_SEGS(sbi)) {
4723			f2fs_err(sbi, "Wrong journal entry on segno %u",
4724				 start);
4725			err = -EFSCORRUPTED;
4726			f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
4727			break;
4728		}
4729
4730		se = &sit_i->sentries[start];
4731		sit = sit_in_journal(journal, i);
4732
4733		old_valid_blocks = se->valid_blocks;
4734
4735		sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks;
4736
4737		err = check_block_count(sbi, start, &sit);
4738		if (err)
4739			break;
4740		seg_info_from_raw_sit(se, &sit);
4741
4742		if (se->type >= NR_PERSISTENT_LOG) {
4743			f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4744							se->type, start);
4745			err = -EFSCORRUPTED;
4746			f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4747			break;
4748		}
4749
4750		sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks;
4751
4752		if (f2fs_block_unit_discard(sbi)) {
4753			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4754				memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4755			} else {
4756				memcpy(se->discard_map, se->cur_valid_map,
4757							SIT_VBLOCK_MAP_SIZE);
4758				sbi->discard_blks += old_valid_blocks;
4759				sbi->discard_blks -= se->valid_blocks;
4760			}
4761		}
4762
4763		if (__is_large_section(sbi)) {
4764			get_sec_entry(sbi, start)->valid_blocks +=
4765							se->valid_blocks;
4766			get_sec_entry(sbi, start)->valid_blocks -=
4767							old_valid_blocks;
4768		}
4769	}
4770	up_read(&curseg->journal_rwsem);
4771
4772	if (err)
4773		return err;
4774
4775	if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
4776		f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4777			 sit_valid_blocks[NODE], valid_node_count(sbi));
4778		f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
4779		return -EFSCORRUPTED;
4780	}
4781
4782	if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] >
4783				valid_user_blocks(sbi)) {
4784		f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
4785			 sit_valid_blocks[DATA], sit_valid_blocks[NODE],
4786			 valid_user_blocks(sbi));
4787		f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
4788		return -EFSCORRUPTED;
4789	}
4790
4791	return 0;
4792}
4793
4794static void init_free_segmap(struct f2fs_sb_info *sbi)
4795{
4796	unsigned int start;
4797	int type;
4798	struct seg_entry *sentry;
4799
4800	for (start = 0; start < MAIN_SEGS(sbi); start++) {
4801		if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4802			continue;
4803		sentry = get_seg_entry(sbi, start);
4804		if (!sentry->valid_blocks)
4805			__set_free(sbi, start);
4806		else
4807			SIT_I(sbi)->written_valid_blocks +=
4808						sentry->valid_blocks;
4809	}
4810
4811	/* set use the current segments */
4812	for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4813		struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4814
4815		__set_test_and_inuse(sbi, curseg_t->segno);
4816	}
4817}
4818
4819static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4820{
4821	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4822	struct free_segmap_info *free_i = FREE_I(sbi);
4823	unsigned int segno = 0, offset = 0, secno;
4824	block_t valid_blocks, usable_blks_in_seg;
4825
4826	while (1) {
4827		/* find dirty segment based on free segmap */
4828		segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4829		if (segno >= MAIN_SEGS(sbi))
4830			break;
4831		offset = segno + 1;
4832		valid_blocks = get_valid_blocks(sbi, segno, false);
4833		usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4834		if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4835			continue;
4836		if (valid_blocks > usable_blks_in_seg) {
4837			f2fs_bug_on(sbi, 1);
4838			continue;
4839		}
4840		mutex_lock(&dirty_i->seglist_lock);
4841		__locate_dirty_segment(sbi, segno, DIRTY);
4842		mutex_unlock(&dirty_i->seglist_lock);
4843	}
4844
4845	if (!__is_large_section(sbi))
4846		return;
4847
4848	mutex_lock(&dirty_i->seglist_lock);
4849	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
4850		valid_blocks = get_valid_blocks(sbi, segno, true);
4851		secno = GET_SEC_FROM_SEG(sbi, segno);
4852
4853		if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
4854			continue;
4855		if (IS_CURSEC(sbi, secno))
4856			continue;
4857		set_bit(secno, dirty_i->dirty_secmap);
4858	}
4859	mutex_unlock(&dirty_i->seglist_lock);
4860}
4861
4862static int init_victim_secmap(struct f2fs_sb_info *sbi)
4863{
4864	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4865	unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4866
4867	dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4868	if (!dirty_i->victim_secmap)
4869		return -ENOMEM;
4870
4871	dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4872	if (!dirty_i->pinned_secmap)
4873		return -ENOMEM;
4874
4875	dirty_i->pinned_secmap_cnt = 0;
4876	dirty_i->enable_pin_section = true;
4877	return 0;
4878}
4879
4880static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4881{
4882	struct dirty_seglist_info *dirty_i;
4883	unsigned int bitmap_size, i;
4884
4885	/* allocate memory for dirty segments list information */
4886	dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4887								GFP_KERNEL);
4888	if (!dirty_i)
4889		return -ENOMEM;
4890
4891	SM_I(sbi)->dirty_info = dirty_i;
4892	mutex_init(&dirty_i->seglist_lock);
4893
4894	bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4895
4896	for (i = 0; i < NR_DIRTY_TYPE; i++) {
4897		dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4898								GFP_KERNEL);
4899		if (!dirty_i->dirty_segmap[i])
4900			return -ENOMEM;
4901	}
4902
4903	if (__is_large_section(sbi)) {
4904		bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4905		dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4906						bitmap_size, GFP_KERNEL);
4907		if (!dirty_i->dirty_secmap)
4908			return -ENOMEM;
4909	}
4910
4911	init_dirty_segmap(sbi);
4912	return init_victim_secmap(sbi);
4913}
4914
4915static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4916{
4917	int i;
4918
4919	/*
4920	 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4921	 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4922	 */
4923	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4924		struct curseg_info *curseg = CURSEG_I(sbi, i);
4925		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4926		unsigned int blkofs = curseg->next_blkoff;
4927
4928		if (f2fs_sb_has_readonly(sbi) &&
4929			i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4930			continue;
4931
4932		sanity_check_seg_type(sbi, curseg->seg_type);
4933
4934		if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
4935			f2fs_err(sbi,
4936				 "Current segment has invalid alloc_type:%d",
4937				 curseg->alloc_type);
4938			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4939			return -EFSCORRUPTED;
4940		}
4941
4942		if (f2fs_test_bit(blkofs, se->cur_valid_map))
4943			goto out;
4944
4945		if (curseg->alloc_type == SSR)
4946			continue;
4947
4948		for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) {
4949			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4950				continue;
4951out:
4952			f2fs_err(sbi,
4953				 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4954				 i, curseg->segno, curseg->alloc_type,
4955				 curseg->next_blkoff, blkofs);
4956			f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4957			return -EFSCORRUPTED;
4958		}
4959	}
4960	return 0;
4961}
4962
4963#ifdef CONFIG_BLK_DEV_ZONED
4964static const char *f2fs_zone_status[BLK_ZONE_COND_OFFLINE + 1] = {
4965	[BLK_ZONE_COND_NOT_WP]		= "NOT_WP",
4966	[BLK_ZONE_COND_EMPTY]		= "EMPTY",
4967	[BLK_ZONE_COND_IMP_OPEN]	= "IMPLICIT_OPEN",
4968	[BLK_ZONE_COND_EXP_OPEN]	= "EXPLICIT_OPEN",
4969	[BLK_ZONE_COND_CLOSED]		= "CLOSED",
4970	[BLK_ZONE_COND_READONLY]	= "READONLY",
4971	[BLK_ZONE_COND_FULL]		= "FULL",
4972	[BLK_ZONE_COND_OFFLINE]		= "OFFLINE",
4973};
4974
4975static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4976				    struct f2fs_dev_info *fdev,
4977				    struct blk_zone *zone)
4978{
4979	unsigned int zone_segno;
4980	block_t zone_block, valid_block_cnt;
4981	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4982	int ret;
4983	unsigned int nofs_flags;
4984
4985	if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4986		return 0;
4987
4988	zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4989	zone_segno = GET_SEGNO(sbi, zone_block);
4990
4991	/*
4992	 * Skip check of zones cursegs point to, since
4993	 * fix_curseg_write_pointer() checks them.
4994	 */
4995	if (zone_segno >= MAIN_SEGS(sbi))
4996		return 0;
4997
4998	/*
4999	 * Get # of valid block of the zone.
5000	 */
5001	valid_block_cnt = get_valid_blocks(sbi, zone_segno, true);
5002	if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
5003		f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
5004				zone_segno, valid_block_cnt,
5005				f2fs_zone_status[zone->cond]);
5006		return 0;
5007	}
5008
5009	if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||
5010	    (valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL))
5011		return 0;
5012
5013	if (!valid_block_cnt) {
5014		f2fs_notice(sbi, "Zone without valid block has non-zero write "
5015			    "pointer. Reset the write pointer: cond[%s]",
5016			    f2fs_zone_status[zone->cond]);
5017		ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
5018					zone->len >> log_sectors_per_block);
5019		if (ret)
5020			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5021				 fdev->path, ret);
5022		return ret;
5023	}
5024
5025	/*
5026	 * If there are valid blocks and the write pointer doesn't match
5027	 * with them, we need to report the inconsistency and fill
5028	 * the zone till the end to close the zone. This inconsistency
5029	 * does not cause write error because the zone will not be
5030	 * selected for write operation until it get discarded.
5031	 */
5032	f2fs_notice(sbi, "Valid blocks are not aligned with write "
5033		    "pointer: valid block[0x%x,0x%x] cond[%s]",
5034		    zone_segno, valid_block_cnt, f2fs_zone_status[zone->cond]);
5035
5036	nofs_flags = memalloc_nofs_save();
5037	ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
5038				zone->start, zone->len);
5039	memalloc_nofs_restore(nofs_flags);
5040	if (ret == -EOPNOTSUPP) {
5041		ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
5042					zone->len - (zone->wp - zone->start),
5043					GFP_NOFS, 0);
5044		if (ret)
5045			f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
5046					fdev->path, ret);
5047	} else if (ret) {
5048		f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
5049				fdev->path, ret);
5050	}
5051
5052	return ret;
5053}
5054
5055static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
5056						  block_t zone_blkaddr)
5057{
5058	int i;
5059
5060	for (i = 0; i < sbi->s_ndevs; i++) {
5061		if (!bdev_is_zoned(FDEV(i).bdev))
5062			continue;
5063		if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
5064				zone_blkaddr <= FDEV(i).end_blk))
5065			return &FDEV(i);
5066	}
5067
5068	return NULL;
5069}
5070
5071static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
5072			      void *data)
5073{
5074	memcpy(data, zone, sizeof(struct blk_zone));
5075	return 0;
5076}
5077
5078static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
5079{
5080	struct curseg_info *cs = CURSEG_I(sbi, type);
5081	struct f2fs_dev_info *zbd;
5082	struct blk_zone zone;
5083	unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
5084	block_t cs_zone_block, wp_block;
5085	unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5086	sector_t zone_sector;
5087	int err;
5088
5089	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5090	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5091
5092	zbd = get_target_zoned_dev(sbi, cs_zone_block);
5093	if (!zbd)
5094		return 0;
5095
5096	/* report zone for the sector the curseg points to */
5097	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5098		<< log_sectors_per_block;
5099	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5100				  report_one_zone_cb, &zone);
5101	if (err != 1) {
5102		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5103			 zbd->path, err);
5104		return err;
5105	}
5106
5107	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5108		return 0;
5109
5110	/*
5111	 * When safely unmounted in the previous mount, we could use current
5112	 * segments. Otherwise, allocate new sections.
5113	 */
5114	if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
5115		wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
5116		wp_segno = GET_SEGNO(sbi, wp_block);
5117		wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5118		wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
5119
5120		if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
5121				wp_sector_off == 0)
5122			return 0;
5123
5124		f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5125			    "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
5126			    cs->next_blkoff, wp_segno, wp_blkoff);
5127	}
5128
5129	/* Allocate a new section if it's not new. */
5130	if (cs->next_blkoff) {
5131		unsigned int old_segno = cs->segno, old_blkoff = cs->next_blkoff;
5132
5133		f2fs_allocate_new_section(sbi, type, true);
5134		f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5135				"[0x%x,0x%x] -> [0x%x,0x%x]",
5136				type, old_segno, old_blkoff,
5137				cs->segno, cs->next_blkoff);
5138	}
5139
5140	/* check consistency of the zone curseg pointed to */
5141	if (check_zone_write_pointer(sbi, zbd, &zone))
5142		return -EIO;
5143
5144	/* check newly assigned zone */
5145	cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5146	cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5147
5148	zbd = get_target_zoned_dev(sbi, cs_zone_block);
5149	if (!zbd)
5150		return 0;
5151
5152	zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
5153		<< log_sectors_per_block;
5154	err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
5155				  report_one_zone_cb, &zone);
5156	if (err != 1) {
5157		f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5158			 zbd->path, err);
5159		return err;
5160	}
5161
5162	if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
5163		return 0;
5164
5165	if (zone.wp != zone.start) {
5166		f2fs_notice(sbi,
5167			    "New zone for curseg[%d] is not yet discarded. "
5168			    "Reset the zone: curseg[0x%x,0x%x]",
5169			    type, cs->segno, cs->next_blkoff);
5170		err = __f2fs_issue_discard_zone(sbi, zbd->bdev,	cs_zone_block,
5171					zone.len >> log_sectors_per_block);
5172		if (err) {
5173			f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5174				 zbd->path, err);
5175			return err;
5176		}
5177	}
5178
5179	return 0;
5180}
5181
5182int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5183{
5184	int i, ret;
5185
5186	for (i = 0; i < NR_PERSISTENT_LOG; i++) {
5187		ret = fix_curseg_write_pointer(sbi, i);
5188		if (ret)
5189			return ret;
5190	}
5191
5192	return 0;
5193}
5194
5195struct check_zone_write_pointer_args {
5196	struct f2fs_sb_info *sbi;
5197	struct f2fs_dev_info *fdev;
5198};
5199
5200static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
5201				      void *data)
5202{
5203	struct check_zone_write_pointer_args *args;
5204
5205	args = (struct check_zone_write_pointer_args *)data;
5206
5207	return check_zone_write_pointer(args->sbi, args->fdev, zone);
5208}
5209
5210int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5211{
5212	int i, ret;
5213	struct check_zone_write_pointer_args args;
5214
5215	for (i = 0; i < sbi->s_ndevs; i++) {
5216		if (!bdev_is_zoned(FDEV(i).bdev))
5217			continue;
5218
5219		args.sbi = sbi;
5220		args.fdev = &FDEV(i);
5221		ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
5222					  check_zone_write_pointer_cb, &args);
5223		if (ret < 0)
5224			return ret;
5225	}
5226
5227	return 0;
5228}
5229
5230/*
5231 * Return the number of usable blocks in a segment. The number of blocks
5232 * returned is always equal to the number of blocks in a segment for
5233 * segments fully contained within a sequential zone capacity or a
5234 * conventional zone. For segments partially contained in a sequential
5235 * zone capacity, the number of usable blocks up to the zone capacity
5236 * is returned. 0 is returned in all other cases.
5237 */
5238static inline unsigned int f2fs_usable_zone_blks_in_seg(
5239			struct f2fs_sb_info *sbi, unsigned int segno)
5240{
5241	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5242	unsigned int secno;
5243
5244	if (!sbi->unusable_blocks_per_sec)
5245		return BLKS_PER_SEG(sbi);
5246
5247	secno = GET_SEC_FROM_SEG(sbi, segno);
5248	seg_start = START_BLOCK(sbi, segno);
5249	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5250	sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5251
5252	/*
5253	 * If segment starts before zone capacity and spans beyond
5254	 * zone capacity, then usable blocks are from seg start to
5255	 * zone capacity. If the segment starts after the zone capacity,
5256	 * then there are no usable blocks.
5257	 */
5258	if (seg_start >= sec_cap_blkaddr)
5259		return 0;
5260	if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr)
5261		return sec_cap_blkaddr - seg_start;
5262
5263	return BLKS_PER_SEG(sbi);
5264}
5265#else
5266int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5267{
5268	return 0;
5269}
5270
5271int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5272{
5273	return 0;
5274}
5275
5276static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5277							unsigned int segno)
5278{
5279	return 0;
5280}
5281
5282#endif
5283unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5284					unsigned int segno)
5285{
5286	if (f2fs_sb_has_blkzoned(sbi))
5287		return f2fs_usable_zone_blks_in_seg(sbi, segno);
5288
5289	return BLKS_PER_SEG(sbi);
5290}
5291
5292unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5293					unsigned int segno)
5294{
5295	if (f2fs_sb_has_blkzoned(sbi))
5296		return CAP_SEGS_PER_SEC(sbi);
5297
5298	return SEGS_PER_SEC(sbi);
5299}
5300
5301/*
5302 * Update min, max modified time for cost-benefit GC algorithm
5303 */
5304static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5305{
5306	struct sit_info *sit_i = SIT_I(sbi);
5307	unsigned int segno;
5308
5309	down_write(&sit_i->sentry_lock);
5310
5311	sit_i->min_mtime = ULLONG_MAX;
5312
5313	for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
5314		unsigned int i;
5315		unsigned long long mtime = 0;
5316
5317		for (i = 0; i < SEGS_PER_SEC(sbi); i++)
5318			mtime += get_seg_entry(sbi, segno + i)->mtime;
5319
5320		mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
5321
5322		if (sit_i->min_mtime > mtime)
5323			sit_i->min_mtime = mtime;
5324	}
5325	sit_i->max_mtime = get_mtime(sbi, false);
5326	sit_i->dirty_max_mtime = 0;
5327	up_write(&sit_i->sentry_lock);
5328}
5329
5330int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5331{
5332	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5333	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5334	struct f2fs_sm_info *sm_info;
5335	int err;
5336
5337	sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5338	if (!sm_info)
5339		return -ENOMEM;
5340
5341	/* init sm info */
5342	sbi->sm_info = sm_info;
5343	sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5344	sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5345	sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5346	sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5347	sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5348	sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5349	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5350	sm_info->rec_prefree_segments = sm_info->main_segments *
5351					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5352	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5353		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5354
5355	if (!f2fs_lfs_mode(sbi))
5356		sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
5357	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5358	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5359	sm_info->min_seq_blocks = BLKS_PER_SEG(sbi);
5360	sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5361	sm_info->min_ssr_sections = reserved_sections(sbi);
5362
5363	INIT_LIST_HEAD(&sm_info->sit_entry_set);
5364
5365	init_f2fs_rwsem(&sm_info->curseg_lock);
5366
5367	err = f2fs_create_flush_cmd_control(sbi);
5368	if (err)
5369		return err;
5370
5371	err = create_discard_cmd_control(sbi);
5372	if (err)
5373		return err;
5374
5375	err = build_sit_info(sbi);
5376	if (err)
5377		return err;
5378	err = build_free_segmap(sbi);
5379	if (err)
5380		return err;
5381	err = build_curseg(sbi);
5382	if (err)
5383		return err;
5384
5385	/* reinit free segmap based on SIT */
5386	err = build_sit_entries(sbi);
5387	if (err)
5388		return err;
5389
5390	init_free_segmap(sbi);
5391	err = build_dirty_segmap(sbi);
5392	if (err)
5393		return err;
5394
5395	err = sanity_check_curseg(sbi);
5396	if (err)
5397		return err;
5398
5399	init_min_max_mtime(sbi);
5400	return 0;
5401}
5402
5403static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5404		enum dirty_type dirty_type)
5405{
5406	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5407
5408	mutex_lock(&dirty_i->seglist_lock);
5409	kvfree(dirty_i->dirty_segmap[dirty_type]);
5410	dirty_i->nr_dirty[dirty_type] = 0;
5411	mutex_unlock(&dirty_i->seglist_lock);
5412}
5413
5414static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5415{
5416	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5417
5418	kvfree(dirty_i->pinned_secmap);
5419	kvfree(dirty_i->victim_secmap);
5420}
5421
5422static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5423{
5424	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5425	int i;
5426
5427	if (!dirty_i)
5428		return;
5429
5430	/* discard pre-free/dirty segments list */
5431	for (i = 0; i < NR_DIRTY_TYPE; i++)
5432		discard_dirty_segmap(sbi, i);
5433
5434	if (__is_large_section(sbi)) {
5435		mutex_lock(&dirty_i->seglist_lock);
5436		kvfree(dirty_i->dirty_secmap);
5437		mutex_unlock(&dirty_i->seglist_lock);
5438	}
5439
5440	destroy_victim_secmap(sbi);
5441	SM_I(sbi)->dirty_info = NULL;
5442	kfree(dirty_i);
5443}
5444
5445static void destroy_curseg(struct f2fs_sb_info *sbi)
5446{
5447	struct curseg_info *array = SM_I(sbi)->curseg_array;
5448	int i;
5449
5450	if (!array)
5451		return;
5452	SM_I(sbi)->curseg_array = NULL;
5453	for (i = 0; i < NR_CURSEG_TYPE; i++) {
5454		kfree(array[i].sum_blk);
5455		kfree(array[i].journal);
5456	}
5457	kfree(array);
5458}
5459
5460static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5461{
5462	struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5463
5464	if (!free_i)
5465		return;
5466	SM_I(sbi)->free_info = NULL;
5467	kvfree(free_i->free_segmap);
5468	kvfree(free_i->free_secmap);
5469	kfree(free_i);
5470}
5471
5472static void destroy_sit_info(struct f2fs_sb_info *sbi)
5473{
5474	struct sit_info *sit_i = SIT_I(sbi);
5475
5476	if (!sit_i)
5477		return;
5478
5479	if (sit_i->sentries)
5480		kvfree(sit_i->bitmap);
5481	kfree(sit_i->tmp_map);
5482
5483	kvfree(sit_i->sentries);
5484	kvfree(sit_i->sec_entries);
5485	kvfree(sit_i->dirty_sentries_bitmap);
5486
5487	SM_I(sbi)->sit_info = NULL;
5488	kvfree(sit_i->sit_bitmap);
5489#ifdef CONFIG_F2FS_CHECK_FS
5490	kvfree(sit_i->sit_bitmap_mir);
5491	kvfree(sit_i->invalid_segmap);
5492#endif
5493	kfree(sit_i);
5494}
5495
5496void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5497{
5498	struct f2fs_sm_info *sm_info = SM_I(sbi);
5499
5500	if (!sm_info)
5501		return;
5502	f2fs_destroy_flush_cmd_control(sbi, true);
5503	destroy_discard_cmd_control(sbi);
5504	destroy_dirty_segmap(sbi);
5505	destroy_curseg(sbi);
5506	destroy_free_segmap(sbi);
5507	destroy_sit_info(sbi);
5508	sbi->sm_info = NULL;
5509	kfree(sm_info);
5510}
5511
5512int __init f2fs_create_segment_manager_caches(void)
5513{
5514	discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5515			sizeof(struct discard_entry));
5516	if (!discard_entry_slab)
5517		goto fail;
5518
5519	discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5520			sizeof(struct discard_cmd));
5521	if (!discard_cmd_slab)
5522		goto destroy_discard_entry;
5523
5524	sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5525			sizeof(struct sit_entry_set));
5526	if (!sit_entry_set_slab)
5527		goto destroy_discard_cmd;
5528
5529	revoke_entry_slab = f2fs_kmem_cache_create("f2fs_revoke_entry",
5530			sizeof(struct revoke_entry));
5531	if (!revoke_entry_slab)
5532		goto destroy_sit_entry_set;
5533	return 0;
5534
5535destroy_sit_entry_set:
5536	kmem_cache_destroy(sit_entry_set_slab);
5537destroy_discard_cmd:
5538	kmem_cache_destroy(discard_cmd_slab);
5539destroy_discard_entry:
5540	kmem_cache_destroy(discard_entry_slab);
5541fail:
5542	return -ENOMEM;
5543}
5544
5545void f2fs_destroy_segment_manager_caches(void)
5546{
5547	kmem_cache_destroy(sit_entry_set_slab);
5548	kmem_cache_destroy(discard_cmd_slab);
5549	kmem_cache_destroy(discard_entry_slab);
5550	kmem_cache_destroy(revoke_entry_slab);
5551}