Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/node.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/mpage.h>
  11#include <linux/backing-dev.h>
  12#include <linux/blkdev.h>
  13#include <linux/pagevec.h>
  14#include <linux/swap.h>
  15
  16#include "f2fs.h"
  17#include "node.h"
  18#include "segment.h"
  19#include "xattr.h"
  20#include <trace/events/f2fs.h>
  21
  22#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
  23
  24static struct kmem_cache *nat_entry_slab;
  25static struct kmem_cache *free_nid_slab;
  26static struct kmem_cache *nat_entry_set_slab;
  27static struct kmem_cache *fsync_node_entry_slab;
  28
  29/*
  30 * Check whether the given nid is within node id range.
  31 */
  32int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
  33{
  34	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
  35		set_sbi_flag(sbi, SBI_NEED_FSCK);
  36		f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
  37			  __func__, nid);
  38		return -EFSCORRUPTED;
  39	}
  40	return 0;
  41}
  42
  43bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
  44{
  45	struct f2fs_nm_info *nm_i = NM_I(sbi);
  46	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
  47	struct sysinfo val;
  48	unsigned long avail_ram;
  49	unsigned long mem_size = 0;
  50	bool res = false;
  51
  52	if (!nm_i)
  53		return true;
  54
  55	si_meminfo(&val);
  56
  57	/* only uses low memory */
  58	avail_ram = val.totalram - val.totalhigh;
  59
  60	/*
  61	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
  62	 */
  63	if (type == FREE_NIDS) {
  64		mem_size = (nm_i->nid_cnt[FREE_NID] *
  65				sizeof(struct free_nid)) >> PAGE_SHIFT;
  66		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
  67	} else if (type == NAT_ENTRIES) {
  68		mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
  69				sizeof(struct nat_entry)) >> PAGE_SHIFT;
  70		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
  71		if (excess_cached_nats(sbi))
  72			res = false;
  73	} else if (type == DIRTY_DENTS) {
  74		if (sbi->sb->s_bdi->wb.dirty_exceeded)
  75			return false;
  76		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
  77		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
  78	} else if (type == INO_ENTRIES) {
  79		int i;
  80
  81		for (i = 0; i < MAX_INO_ENTRY; i++)
  82			mem_size += sbi->im[i].ino_num *
  83						sizeof(struct ino_entry);
  84		mem_size >>= PAGE_SHIFT;
  85		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
  86	} else if (type == EXTENT_CACHE) {
  87		mem_size = (atomic_read(&sbi->total_ext_tree) *
  88				sizeof(struct extent_tree) +
  89				atomic_read(&sbi->total_ext_node) *
  90				sizeof(struct extent_node)) >> PAGE_SHIFT;
  91		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
  92	} else if (type == INMEM_PAGES) {
  93		/* it allows 20% / total_ram for inmemory pages */
  94		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
  95		res = mem_size < (val.totalram / 5);
  96	} else if (type == DISCARD_CACHE) {
  97		mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
  98				sizeof(struct discard_cmd)) >> PAGE_SHIFT;
  99		res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
 100	} else if (type == COMPRESS_PAGE) {
 101#ifdef CONFIG_F2FS_FS_COMPRESSION
 102		unsigned long free_ram = val.freeram;
 103
 104		/*
 105		 * free memory is lower than watermark or cached page count
 106		 * exceed threshold, deny caching compress page.
 107		 */
 108		res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
 109			(COMPRESS_MAPPING(sbi)->nrpages <
 110			 free_ram * sbi->compress_percent / 100);
 111#else
 112		res = false;
 113#endif
 114	} else {
 115		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
 116			return true;
 117	}
 118	return res;
 119}
 120
 121static void clear_node_page_dirty(struct page *page)
 122{
 123	if (PageDirty(page)) {
 124		f2fs_clear_page_cache_dirty_tag(page);
 125		clear_page_dirty_for_io(page);
 126		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
 127	}
 128	ClearPageUptodate(page);
 129}
 130
 131static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 132{
 133	return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
 134}
 135
 136static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 137{
 138	struct page *src_page;
 139	struct page *dst_page;
 140	pgoff_t dst_off;
 141	void *src_addr;
 142	void *dst_addr;
 143	struct f2fs_nm_info *nm_i = NM_I(sbi);
 144
 145	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
 146
 147	/* get current nat block page with lock */
 148	src_page = get_current_nat_page(sbi, nid);
 149	if (IS_ERR(src_page))
 150		return src_page;
 151	dst_page = f2fs_grab_meta_page(sbi, dst_off);
 152	f2fs_bug_on(sbi, PageDirty(src_page));
 153
 154	src_addr = page_address(src_page);
 155	dst_addr = page_address(dst_page);
 156	memcpy(dst_addr, src_addr, PAGE_SIZE);
 157	set_page_dirty(dst_page);
 158	f2fs_put_page(src_page, 1);
 159
 160	set_to_next_nat(nm_i, nid);
 161
 162	return dst_page;
 163}
 164
 165static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
 166{
 167	struct nat_entry *new;
 168
 169	if (no_fail)
 170		new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
 171	else
 172		new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
 173	if (new) {
 174		nat_set_nid(new, nid);
 175		nat_reset_flag(new);
 176	}
 177	return new;
 178}
 179
 180static void __free_nat_entry(struct nat_entry *e)
 181{
 182	kmem_cache_free(nat_entry_slab, e);
 183}
 184
 185/* must be locked by nat_tree_lock */
 186static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
 187	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
 188{
 189	if (no_fail)
 190		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
 191	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
 192		return NULL;
 193
 194	if (raw_ne)
 195		node_info_from_raw_nat(&ne->ni, raw_ne);
 196
 197	spin_lock(&nm_i->nat_list_lock);
 198	list_add_tail(&ne->list, &nm_i->nat_entries);
 199	spin_unlock(&nm_i->nat_list_lock);
 200
 201	nm_i->nat_cnt[TOTAL_NAT]++;
 202	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
 203	return ne;
 204}
 205
 206static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 207{
 208	struct nat_entry *ne;
 209
 210	ne = radix_tree_lookup(&nm_i->nat_root, n);
 211
 212	/* for recent accessed nat entry, move it to tail of lru list */
 213	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
 214		spin_lock(&nm_i->nat_list_lock);
 215		if (!list_empty(&ne->list))
 216			list_move_tail(&ne->list, &nm_i->nat_entries);
 217		spin_unlock(&nm_i->nat_list_lock);
 218	}
 219
 220	return ne;
 221}
 222
 223static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
 224		nid_t start, unsigned int nr, struct nat_entry **ep)
 225{
 226	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
 227}
 228
 229static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
 230{
 231	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
 232	nm_i->nat_cnt[TOTAL_NAT]--;
 233	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
 234	__free_nat_entry(e);
 235}
 236
 237static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
 238							struct nat_entry *ne)
 239{
 240	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
 241	struct nat_entry_set *head;
 242
 243	head = radix_tree_lookup(&nm_i->nat_set_root, set);
 244	if (!head) {
 245		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
 246
 247		INIT_LIST_HEAD(&head->entry_list);
 248		INIT_LIST_HEAD(&head->set_list);
 249		head->set = set;
 250		head->entry_cnt = 0;
 251		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
 252	}
 253	return head;
 254}
 255
 256static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
 257						struct nat_entry *ne)
 258{
 259	struct nat_entry_set *head;
 260	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
 261
 262	if (!new_ne)
 263		head = __grab_nat_entry_set(nm_i, ne);
 264
 265	/*
 266	 * update entry_cnt in below condition:
 267	 * 1. update NEW_ADDR to valid block address;
 268	 * 2. update old block address to new one;
 269	 */
 270	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
 271				!get_nat_flag(ne, IS_DIRTY)))
 272		head->entry_cnt++;
 273
 274	set_nat_flag(ne, IS_PREALLOC, new_ne);
 275
 276	if (get_nat_flag(ne, IS_DIRTY))
 277		goto refresh_list;
 278
 279	nm_i->nat_cnt[DIRTY_NAT]++;
 280	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
 281	set_nat_flag(ne, IS_DIRTY, true);
 282refresh_list:
 283	spin_lock(&nm_i->nat_list_lock);
 284	if (new_ne)
 285		list_del_init(&ne->list);
 286	else
 287		list_move_tail(&ne->list, &head->entry_list);
 288	spin_unlock(&nm_i->nat_list_lock);
 289}
 290
 291static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
 292		struct nat_entry_set *set, struct nat_entry *ne)
 293{
 294	spin_lock(&nm_i->nat_list_lock);
 295	list_move_tail(&ne->list, &nm_i->nat_entries);
 296	spin_unlock(&nm_i->nat_list_lock);
 297
 298	set_nat_flag(ne, IS_DIRTY, false);
 299	set->entry_cnt--;
 300	nm_i->nat_cnt[DIRTY_NAT]--;
 301	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
 302}
 303
 304static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
 305		nid_t start, unsigned int nr, struct nat_entry_set **ep)
 306{
 307	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
 308							start, nr);
 309}
 310
 311bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
 312{
 313	return NODE_MAPPING(sbi) == page->mapping &&
 314			IS_DNODE(page) && is_cold_node(page);
 315}
 316
 317void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
 318{
 319	spin_lock_init(&sbi->fsync_node_lock);
 320	INIT_LIST_HEAD(&sbi->fsync_node_list);
 321	sbi->fsync_seg_id = 0;
 322	sbi->fsync_node_num = 0;
 323}
 324
 325static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
 326							struct page *page)
 327{
 328	struct fsync_node_entry *fn;
 329	unsigned long flags;
 330	unsigned int seq_id;
 331
 332	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
 333
 334	get_page(page);
 335	fn->page = page;
 336	INIT_LIST_HEAD(&fn->list);
 337
 338	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
 339	list_add_tail(&fn->list, &sbi->fsync_node_list);
 340	fn->seq_id = sbi->fsync_seg_id++;
 341	seq_id = fn->seq_id;
 342	sbi->fsync_node_num++;
 343	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 344
 345	return seq_id;
 346}
 347
 348void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
 349{
 350	struct fsync_node_entry *fn;
 351	unsigned long flags;
 352
 353	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
 354	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
 355		if (fn->page == page) {
 356			list_del(&fn->list);
 357			sbi->fsync_node_num--;
 358			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 359			kmem_cache_free(fsync_node_entry_slab, fn);
 360			put_page(page);
 361			return;
 362		}
 363	}
 364	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 365	f2fs_bug_on(sbi, 1);
 366}
 367
 368void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
 369{
 370	unsigned long flags;
 371
 372	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
 373	sbi->fsync_seg_id = 0;
 374	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 375}
 376
 377int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
 378{
 379	struct f2fs_nm_info *nm_i = NM_I(sbi);
 380	struct nat_entry *e;
 381	bool need = false;
 382
 383	down_read(&nm_i->nat_tree_lock);
 384	e = __lookup_nat_cache(nm_i, nid);
 385	if (e) {
 386		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
 387				!get_nat_flag(e, HAS_FSYNCED_INODE))
 388			need = true;
 389	}
 390	up_read(&nm_i->nat_tree_lock);
 391	return need;
 392}
 393
 394bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
 395{
 396	struct f2fs_nm_info *nm_i = NM_I(sbi);
 397	struct nat_entry *e;
 398	bool is_cp = true;
 399
 400	down_read(&nm_i->nat_tree_lock);
 401	e = __lookup_nat_cache(nm_i, nid);
 402	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
 403		is_cp = false;
 404	up_read(&nm_i->nat_tree_lock);
 405	return is_cp;
 406}
 407
 408bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
 409{
 410	struct f2fs_nm_info *nm_i = NM_I(sbi);
 411	struct nat_entry *e;
 412	bool need_update = true;
 413
 414	down_read(&nm_i->nat_tree_lock);
 415	e = __lookup_nat_cache(nm_i, ino);
 416	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
 417			(get_nat_flag(e, IS_CHECKPOINTED) ||
 418			 get_nat_flag(e, HAS_FSYNCED_INODE)))
 419		need_update = false;
 420	up_read(&nm_i->nat_tree_lock);
 421	return need_update;
 422}
 423
 424/* must be locked by nat_tree_lock */
 425static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
 426						struct f2fs_nat_entry *ne)
 427{
 428	struct f2fs_nm_info *nm_i = NM_I(sbi);
 429	struct nat_entry *new, *e;
 430
 431	new = __alloc_nat_entry(nid, false);
 432	if (!new)
 433		return;
 434
 435	down_write(&nm_i->nat_tree_lock);
 436	e = __lookup_nat_cache(nm_i, nid);
 437	if (!e)
 438		e = __init_nat_entry(nm_i, new, ne, false);
 439	else
 440		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
 441				nat_get_blkaddr(e) !=
 442					le32_to_cpu(ne->block_addr) ||
 443				nat_get_version(e) != ne->version);
 444	up_write(&nm_i->nat_tree_lock);
 445	if (e != new)
 446		__free_nat_entry(new);
 447}
 448
 449static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
 450			block_t new_blkaddr, bool fsync_done)
 451{
 452	struct f2fs_nm_info *nm_i = NM_I(sbi);
 453	struct nat_entry *e;
 454	struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
 455
 456	down_write(&nm_i->nat_tree_lock);
 457	e = __lookup_nat_cache(nm_i, ni->nid);
 458	if (!e) {
 459		e = __init_nat_entry(nm_i, new, NULL, true);
 460		copy_node_info(&e->ni, ni);
 461		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
 462	} else if (new_blkaddr == NEW_ADDR) {
 463		/*
 464		 * when nid is reallocated,
 465		 * previous nat entry can be remained in nat cache.
 466		 * So, reinitialize it with new information.
 467		 */
 468		copy_node_info(&e->ni, ni);
 469		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
 470	}
 471	/* let's free early to reduce memory consumption */
 472	if (e != new)
 473		__free_nat_entry(new);
 474
 475	/* sanity check */
 476	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
 477	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
 478			new_blkaddr == NULL_ADDR);
 479	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
 480			new_blkaddr == NEW_ADDR);
 481	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
 482			new_blkaddr == NEW_ADDR);
 483
 484	/* increment version no as node is removed */
 485	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
 486		unsigned char version = nat_get_version(e);
 487
 488		nat_set_version(e, inc_node_version(version));
 489	}
 490
 491	/* change address */
 492	nat_set_blkaddr(e, new_blkaddr);
 493	if (!__is_valid_data_blkaddr(new_blkaddr))
 494		set_nat_flag(e, IS_CHECKPOINTED, false);
 495	__set_nat_cache_dirty(nm_i, e);
 496
 497	/* update fsync_mark if its inode nat entry is still alive */
 498	if (ni->nid != ni->ino)
 499		e = __lookup_nat_cache(nm_i, ni->ino);
 500	if (e) {
 501		if (fsync_done && ni->nid == ni->ino)
 502			set_nat_flag(e, HAS_FSYNCED_INODE, true);
 503		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
 504	}
 505	up_write(&nm_i->nat_tree_lock);
 506}
 507
 508int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 509{
 510	struct f2fs_nm_info *nm_i = NM_I(sbi);
 511	int nr = nr_shrink;
 512
 513	if (!down_write_trylock(&nm_i->nat_tree_lock))
 514		return 0;
 515
 516	spin_lock(&nm_i->nat_list_lock);
 517	while (nr_shrink) {
 518		struct nat_entry *ne;
 519
 520		if (list_empty(&nm_i->nat_entries))
 521			break;
 522
 523		ne = list_first_entry(&nm_i->nat_entries,
 524					struct nat_entry, list);
 525		list_del(&ne->list);
 526		spin_unlock(&nm_i->nat_list_lock);
 527
 528		__del_from_nat_cache(nm_i, ne);
 529		nr_shrink--;
 530
 531		spin_lock(&nm_i->nat_list_lock);
 532	}
 533	spin_unlock(&nm_i->nat_list_lock);
 534
 535	up_write(&nm_i->nat_tree_lock);
 536	return nr - nr_shrink;
 537}
 538
 539int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
 540						struct node_info *ni)
 541{
 542	struct f2fs_nm_info *nm_i = NM_I(sbi);
 543	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
 544	struct f2fs_journal *journal = curseg->journal;
 545	nid_t start_nid = START_NID(nid);
 546	struct f2fs_nat_block *nat_blk;
 547	struct page *page = NULL;
 548	struct f2fs_nat_entry ne;
 549	struct nat_entry *e;
 550	pgoff_t index;
 551	block_t blkaddr;
 552	int i;
 553
 554	ni->nid = nid;
 555
 556	/* Check nat cache */
 557	down_read(&nm_i->nat_tree_lock);
 558	e = __lookup_nat_cache(nm_i, nid);
 559	if (e) {
 560		ni->ino = nat_get_ino(e);
 561		ni->blk_addr = nat_get_blkaddr(e);
 562		ni->version = nat_get_version(e);
 563		up_read(&nm_i->nat_tree_lock);
 564		return 0;
 565	}
 566
 567	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
 568
 569	/* Check current segment summary */
 570	down_read(&curseg->journal_rwsem);
 571	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
 572	if (i >= 0) {
 573		ne = nat_in_journal(journal, i);
 574		node_info_from_raw_nat(ni, &ne);
 575	}
 576	up_read(&curseg->journal_rwsem);
 577	if (i >= 0) {
 578		up_read(&nm_i->nat_tree_lock);
 579		goto cache;
 580	}
 581
 582	/* Fill node_info from nat page */
 583	index = current_nat_addr(sbi, nid);
 584	up_read(&nm_i->nat_tree_lock);
 585
 586	page = f2fs_get_meta_page(sbi, index);
 587	if (IS_ERR(page))
 588		return PTR_ERR(page);
 589
 590	nat_blk = (struct f2fs_nat_block *)page_address(page);
 591	ne = nat_blk->entries[nid - start_nid];
 592	node_info_from_raw_nat(ni, &ne);
 593	f2fs_put_page(page, 1);
 594cache:
 595	blkaddr = le32_to_cpu(ne.block_addr);
 596	if (__is_valid_data_blkaddr(blkaddr) &&
 597		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
 598		return -EFAULT;
 599
 600	/* cache nat entry */
 601	cache_nat_entry(sbi, nid, &ne);
 602	return 0;
 603}
 604
 605/*
 606 * readahead MAX_RA_NODE number of node pages.
 607 */
 608static void f2fs_ra_node_pages(struct page *parent, int start, int n)
 609{
 610	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
 611	struct blk_plug plug;
 612	int i, end;
 613	nid_t nid;
 614
 615	blk_start_plug(&plug);
 616
 617	/* Then, try readahead for siblings of the desired node */
 618	end = start + n;
 619	end = min(end, NIDS_PER_BLOCK);
 620	for (i = start; i < end; i++) {
 621		nid = get_nid(parent, i, false);
 622		f2fs_ra_node_page(sbi, nid);
 623	}
 624
 625	blk_finish_plug(&plug);
 626}
 627
 628pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
 629{
 630	const long direct_index = ADDRS_PER_INODE(dn->inode);
 631	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
 632	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
 633	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
 634	int cur_level = dn->cur_level;
 635	int max_level = dn->max_level;
 636	pgoff_t base = 0;
 637
 638	if (!dn->max_level)
 639		return pgofs + 1;
 640
 641	while (max_level-- > cur_level)
 642		skipped_unit *= NIDS_PER_BLOCK;
 643
 644	switch (dn->max_level) {
 645	case 3:
 646		base += 2 * indirect_blks;
 647		fallthrough;
 648	case 2:
 649		base += 2 * direct_blks;
 650		fallthrough;
 651	case 1:
 652		base += direct_index;
 653		break;
 654	default:
 655		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
 656	}
 657
 658	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
 659}
 660
 661/*
 662 * The maximum depth is four.
 663 * Offset[0] will have raw inode offset.
 664 */
 665static int get_node_path(struct inode *inode, long block,
 666				int offset[4], unsigned int noffset[4])
 667{
 668	const long direct_index = ADDRS_PER_INODE(inode);
 669	const long direct_blks = ADDRS_PER_BLOCK(inode);
 670	const long dptrs_per_blk = NIDS_PER_BLOCK;
 671	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
 672	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
 673	int n = 0;
 674	int level = 0;
 675
 676	noffset[0] = 0;
 677
 678	if (block < direct_index) {
 679		offset[n] = block;
 680		goto got;
 681	}
 682	block -= direct_index;
 683	if (block < direct_blks) {
 684		offset[n++] = NODE_DIR1_BLOCK;
 685		noffset[n] = 1;
 686		offset[n] = block;
 687		level = 1;
 688		goto got;
 689	}
 690	block -= direct_blks;
 691	if (block < direct_blks) {
 692		offset[n++] = NODE_DIR2_BLOCK;
 693		noffset[n] = 2;
 694		offset[n] = block;
 695		level = 1;
 696		goto got;
 697	}
 698	block -= direct_blks;
 699	if (block < indirect_blks) {
 700		offset[n++] = NODE_IND1_BLOCK;
 701		noffset[n] = 3;
 702		offset[n++] = block / direct_blks;
 703		noffset[n] = 4 + offset[n - 1];
 704		offset[n] = block % direct_blks;
 705		level = 2;
 706		goto got;
 707	}
 708	block -= indirect_blks;
 709	if (block < indirect_blks) {
 710		offset[n++] = NODE_IND2_BLOCK;
 711		noffset[n] = 4 + dptrs_per_blk;
 712		offset[n++] = block / direct_blks;
 713		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
 714		offset[n] = block % direct_blks;
 715		level = 2;
 716		goto got;
 717	}
 718	block -= indirect_blks;
 719	if (block < dindirect_blks) {
 720		offset[n++] = NODE_DIND_BLOCK;
 721		noffset[n] = 5 + (dptrs_per_blk * 2);
 722		offset[n++] = block / indirect_blks;
 723		noffset[n] = 6 + (dptrs_per_blk * 2) +
 724			      offset[n - 1] * (dptrs_per_blk + 1);
 725		offset[n++] = (block / direct_blks) % dptrs_per_blk;
 726		noffset[n] = 7 + (dptrs_per_blk * 2) +
 727			      offset[n - 2] * (dptrs_per_blk + 1) +
 728			      offset[n - 1];
 729		offset[n] = block % direct_blks;
 730		level = 3;
 731		goto got;
 732	} else {
 733		return -E2BIG;
 734	}
 735got:
 736	return level;
 737}
 738
 739/*
 740 * Caller should call f2fs_put_dnode(dn).
 741 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
 742 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
 743 */
 744int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 745{
 746	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 747	struct page *npage[4];
 748	struct page *parent = NULL;
 749	int offset[4];
 750	unsigned int noffset[4];
 751	nid_t nids[4];
 752	int level, i = 0;
 753	int err = 0;
 754
 755	level = get_node_path(dn->inode, index, offset, noffset);
 756	if (level < 0)
 757		return level;
 758
 759	nids[0] = dn->inode->i_ino;
 760	npage[0] = dn->inode_page;
 761
 762	if (!npage[0]) {
 763		npage[0] = f2fs_get_node_page(sbi, nids[0]);
 764		if (IS_ERR(npage[0]))
 765			return PTR_ERR(npage[0]);
 766	}
 767
 768	/* if inline_data is set, should not report any block indices */
 769	if (f2fs_has_inline_data(dn->inode) && index) {
 770		err = -ENOENT;
 771		f2fs_put_page(npage[0], 1);
 772		goto release_out;
 773	}
 774
 775	parent = npage[0];
 776	if (level != 0)
 777		nids[1] = get_nid(parent, offset[0], true);
 778	dn->inode_page = npage[0];
 779	dn->inode_page_locked = true;
 780
 781	/* get indirect or direct nodes */
 782	for (i = 1; i <= level; i++) {
 783		bool done = false;
 784
 785		if (!nids[i] && mode == ALLOC_NODE) {
 786			/* alloc new node */
 787			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
 788				err = -ENOSPC;
 789				goto release_pages;
 790			}
 791
 792			dn->nid = nids[i];
 793			npage[i] = f2fs_new_node_page(dn, noffset[i]);
 794			if (IS_ERR(npage[i])) {
 795				f2fs_alloc_nid_failed(sbi, nids[i]);
 796				err = PTR_ERR(npage[i]);
 797				goto release_pages;
 798			}
 799
 800			set_nid(parent, offset[i - 1], nids[i], i == 1);
 801			f2fs_alloc_nid_done(sbi, nids[i]);
 802			done = true;
 803		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
 804			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
 805			if (IS_ERR(npage[i])) {
 806				err = PTR_ERR(npage[i]);
 807				goto release_pages;
 808			}
 809			done = true;
 810		}
 811		if (i == 1) {
 812			dn->inode_page_locked = false;
 813			unlock_page(parent);
 814		} else {
 815			f2fs_put_page(parent, 1);
 816		}
 817
 818		if (!done) {
 819			npage[i] = f2fs_get_node_page(sbi, nids[i]);
 820			if (IS_ERR(npage[i])) {
 821				err = PTR_ERR(npage[i]);
 822				f2fs_put_page(npage[0], 0);
 823				goto release_out;
 824			}
 825		}
 826		if (i < level) {
 827			parent = npage[i];
 828			nids[i + 1] = get_nid(parent, offset[i], false);
 829		}
 830	}
 831	dn->nid = nids[level];
 832	dn->ofs_in_node = offset[level];
 833	dn->node_page = npage[level];
 834	dn->data_blkaddr = f2fs_data_blkaddr(dn);
 835	return 0;
 836
 837release_pages:
 838	f2fs_put_page(parent, 1);
 839	if (i > 1)
 840		f2fs_put_page(npage[0], 0);
 841release_out:
 842	dn->inode_page = NULL;
 843	dn->node_page = NULL;
 844	if (err == -ENOENT) {
 845		dn->cur_level = i;
 846		dn->max_level = level;
 847		dn->ofs_in_node = offset[level];
 848	}
 849	return err;
 850}
 851
 852static int truncate_node(struct dnode_of_data *dn)
 853{
 854	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 855	struct node_info ni;
 856	int err;
 857	pgoff_t index;
 858
 859	err = f2fs_get_node_info(sbi, dn->nid, &ni);
 860	if (err)
 861		return err;
 862
 863	/* Deallocate node address */
 864	f2fs_invalidate_blocks(sbi, ni.blk_addr);
 865	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
 866	set_node_addr(sbi, &ni, NULL_ADDR, false);
 867
 868	if (dn->nid == dn->inode->i_ino) {
 869		f2fs_remove_orphan_inode(sbi, dn->nid);
 870		dec_valid_inode_count(sbi);
 871		f2fs_inode_synced(dn->inode);
 872	}
 873
 874	clear_node_page_dirty(dn->node_page);
 875	set_sbi_flag(sbi, SBI_IS_DIRTY);
 876
 877	index = dn->node_page->index;
 878	f2fs_put_page(dn->node_page, 1);
 879
 880	invalidate_mapping_pages(NODE_MAPPING(sbi),
 881			index, index);
 882
 883	dn->node_page = NULL;
 884	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
 885
 886	return 0;
 887}
 888
 889static int truncate_dnode(struct dnode_of_data *dn)
 890{
 891	struct page *page;
 892	int err;
 893
 894	if (dn->nid == 0)
 895		return 1;
 896
 897	/* get direct node */
 898	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
 899	if (PTR_ERR(page) == -ENOENT)
 900		return 1;
 901	else if (IS_ERR(page))
 902		return PTR_ERR(page);
 903
 904	/* Make dnode_of_data for parameter */
 905	dn->node_page = page;
 906	dn->ofs_in_node = 0;
 907	f2fs_truncate_data_blocks(dn);
 908	err = truncate_node(dn);
 909	if (err)
 910		return err;
 911
 912	return 1;
 913}
 914
 915static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
 916						int ofs, int depth)
 917{
 918	struct dnode_of_data rdn = *dn;
 919	struct page *page;
 920	struct f2fs_node *rn;
 921	nid_t child_nid;
 922	unsigned int child_nofs;
 923	int freed = 0;
 924	int i, ret;
 925
 926	if (dn->nid == 0)
 927		return NIDS_PER_BLOCK + 1;
 928
 929	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
 930
 931	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
 932	if (IS_ERR(page)) {
 933		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
 934		return PTR_ERR(page);
 935	}
 936
 937	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
 938
 939	rn = F2FS_NODE(page);
 940	if (depth < 3) {
 941		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
 942			child_nid = le32_to_cpu(rn->in.nid[i]);
 943			if (child_nid == 0)
 944				continue;
 945			rdn.nid = child_nid;
 946			ret = truncate_dnode(&rdn);
 947			if (ret < 0)
 948				goto out_err;
 949			if (set_nid(page, i, 0, false))
 950				dn->node_changed = true;
 951		}
 952	} else {
 953		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
 954		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
 955			child_nid = le32_to_cpu(rn->in.nid[i]);
 956			if (child_nid == 0) {
 957				child_nofs += NIDS_PER_BLOCK + 1;
 958				continue;
 959			}
 960			rdn.nid = child_nid;
 961			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
 962			if (ret == (NIDS_PER_BLOCK + 1)) {
 963				if (set_nid(page, i, 0, false))
 964					dn->node_changed = true;
 965				child_nofs += ret;
 966			} else if (ret < 0 && ret != -ENOENT) {
 967				goto out_err;
 968			}
 969		}
 970		freed = child_nofs;
 971	}
 972
 973	if (!ofs) {
 974		/* remove current indirect node */
 975		dn->node_page = page;
 976		ret = truncate_node(dn);
 977		if (ret)
 978			goto out_err;
 979		freed++;
 980	} else {
 981		f2fs_put_page(page, 1);
 982	}
 983	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
 984	return freed;
 985
 986out_err:
 987	f2fs_put_page(page, 1);
 988	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
 989	return ret;
 990}
 991
 992static int truncate_partial_nodes(struct dnode_of_data *dn,
 993			struct f2fs_inode *ri, int *offset, int depth)
 994{
 995	struct page *pages[2];
 996	nid_t nid[3];
 997	nid_t child_nid;
 998	int err = 0;
 999	int i;
1000	int idx = depth - 2;
1001
1002	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1003	if (!nid[0])
1004		return 0;
1005
1006	/* get indirect nodes in the path */
1007	for (i = 0; i < idx + 1; i++) {
1008		/* reference count'll be increased */
1009		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1010		if (IS_ERR(pages[i])) {
1011			err = PTR_ERR(pages[i]);
1012			idx = i - 1;
1013			goto fail;
1014		}
1015		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1016	}
1017
1018	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1019
1020	/* free direct nodes linked to a partial indirect node */
1021	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1022		child_nid = get_nid(pages[idx], i, false);
1023		if (!child_nid)
1024			continue;
1025		dn->nid = child_nid;
1026		err = truncate_dnode(dn);
1027		if (err < 0)
1028			goto fail;
1029		if (set_nid(pages[idx], i, 0, false))
1030			dn->node_changed = true;
1031	}
1032
1033	if (offset[idx + 1] == 0) {
1034		dn->node_page = pages[idx];
1035		dn->nid = nid[idx];
1036		err = truncate_node(dn);
1037		if (err)
1038			goto fail;
1039	} else {
1040		f2fs_put_page(pages[idx], 1);
1041	}
1042	offset[idx]++;
1043	offset[idx + 1] = 0;
1044	idx--;
1045fail:
1046	for (i = idx; i >= 0; i--)
1047		f2fs_put_page(pages[i], 1);
1048
1049	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1050
1051	return err;
1052}
1053
1054/*
1055 * All the block addresses of data and nodes should be nullified.
1056 */
1057int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1058{
1059	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1060	int err = 0, cont = 1;
1061	int level, offset[4], noffset[4];
1062	unsigned int nofs = 0;
1063	struct f2fs_inode *ri;
1064	struct dnode_of_data dn;
1065	struct page *page;
1066
1067	trace_f2fs_truncate_inode_blocks_enter(inode, from);
1068
1069	level = get_node_path(inode, from, offset, noffset);
1070	if (level < 0) {
1071		trace_f2fs_truncate_inode_blocks_exit(inode, level);
1072		return level;
1073	}
1074
1075	page = f2fs_get_node_page(sbi, inode->i_ino);
1076	if (IS_ERR(page)) {
1077		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1078		return PTR_ERR(page);
1079	}
1080
1081	set_new_dnode(&dn, inode, page, NULL, 0);
1082	unlock_page(page);
1083
1084	ri = F2FS_INODE(page);
1085	switch (level) {
1086	case 0:
1087	case 1:
1088		nofs = noffset[1];
1089		break;
1090	case 2:
1091		nofs = noffset[1];
1092		if (!offset[level - 1])
1093			goto skip_partial;
1094		err = truncate_partial_nodes(&dn, ri, offset, level);
1095		if (err < 0 && err != -ENOENT)
1096			goto fail;
1097		nofs += 1 + NIDS_PER_BLOCK;
1098		break;
1099	case 3:
1100		nofs = 5 + 2 * NIDS_PER_BLOCK;
1101		if (!offset[level - 1])
1102			goto skip_partial;
1103		err = truncate_partial_nodes(&dn, ri, offset, level);
1104		if (err < 0 && err != -ENOENT)
1105			goto fail;
1106		break;
1107	default:
1108		BUG();
1109	}
1110
1111skip_partial:
1112	while (cont) {
1113		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1114		switch (offset[0]) {
1115		case NODE_DIR1_BLOCK:
1116		case NODE_DIR2_BLOCK:
1117			err = truncate_dnode(&dn);
1118			break;
1119
1120		case NODE_IND1_BLOCK:
1121		case NODE_IND2_BLOCK:
1122			err = truncate_nodes(&dn, nofs, offset[1], 2);
1123			break;
1124
1125		case NODE_DIND_BLOCK:
1126			err = truncate_nodes(&dn, nofs, offset[1], 3);
1127			cont = 0;
1128			break;
1129
1130		default:
1131			BUG();
1132		}
1133		if (err < 0 && err != -ENOENT)
1134			goto fail;
1135		if (offset[1] == 0 &&
1136				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1137			lock_page(page);
1138			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1139			f2fs_wait_on_page_writeback(page, NODE, true, true);
1140			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1141			set_page_dirty(page);
1142			unlock_page(page);
1143		}
1144		offset[1] = 0;
1145		offset[0]++;
1146		nofs += err;
1147	}
1148fail:
1149	f2fs_put_page(page, 0);
1150	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1151	return err > 0 ? 0 : err;
1152}
1153
1154/* caller must lock inode page */
1155int f2fs_truncate_xattr_node(struct inode *inode)
1156{
1157	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1158	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1159	struct dnode_of_data dn;
1160	struct page *npage;
1161	int err;
1162
1163	if (!nid)
1164		return 0;
1165
1166	npage = f2fs_get_node_page(sbi, nid);
1167	if (IS_ERR(npage))
1168		return PTR_ERR(npage);
1169
1170	set_new_dnode(&dn, inode, NULL, npage, nid);
1171	err = truncate_node(&dn);
1172	if (err) {
1173		f2fs_put_page(npage, 1);
1174		return err;
1175	}
1176
1177	f2fs_i_xnid_write(inode, 0);
1178
1179	return 0;
1180}
1181
1182/*
1183 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1184 * f2fs_unlock_op().
1185 */
1186int f2fs_remove_inode_page(struct inode *inode)
1187{
1188	struct dnode_of_data dn;
1189	int err;
1190
1191	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1192	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1193	if (err)
1194		return err;
1195
1196	err = f2fs_truncate_xattr_node(inode);
1197	if (err) {
1198		f2fs_put_dnode(&dn);
1199		return err;
1200	}
1201
1202	/* remove potential inline_data blocks */
1203	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1204				S_ISLNK(inode->i_mode))
1205		f2fs_truncate_data_blocks_range(&dn, 1);
1206
1207	/* 0 is possible, after f2fs_new_inode() has failed */
1208	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1209		f2fs_put_dnode(&dn);
1210		return -EIO;
1211	}
1212
1213	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1214		f2fs_warn(F2FS_I_SB(inode),
1215			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1216			inode->i_ino, (unsigned long long)inode->i_blocks);
1217		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1218	}
1219
1220	/* will put inode & node pages */
1221	err = truncate_node(&dn);
1222	if (err) {
1223		f2fs_put_dnode(&dn);
1224		return err;
1225	}
1226	return 0;
1227}
1228
1229struct page *f2fs_new_inode_page(struct inode *inode)
1230{
1231	struct dnode_of_data dn;
1232
1233	/* allocate inode page for new inode */
1234	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1235
1236	/* caller should f2fs_put_page(page, 1); */
1237	return f2fs_new_node_page(&dn, 0);
1238}
1239
1240struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1241{
1242	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1243	struct node_info new_ni;
1244	struct page *page;
1245	int err;
1246
1247	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1248		return ERR_PTR(-EPERM);
1249
1250	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1251	if (!page)
1252		return ERR_PTR(-ENOMEM);
1253
1254	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1255		goto fail;
1256
1257#ifdef CONFIG_F2FS_CHECK_FS
1258	err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
1259	if (err) {
1260		dec_valid_node_count(sbi, dn->inode, !ofs);
1261		goto fail;
1262	}
1263	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1264#endif
1265	new_ni.nid = dn->nid;
1266	new_ni.ino = dn->inode->i_ino;
1267	new_ni.blk_addr = NULL_ADDR;
1268	new_ni.flag = 0;
1269	new_ni.version = 0;
1270	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1271
1272	f2fs_wait_on_page_writeback(page, NODE, true, true);
1273	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1274	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1275	if (!PageUptodate(page))
1276		SetPageUptodate(page);
1277	if (set_page_dirty(page))
1278		dn->node_changed = true;
1279
1280	if (f2fs_has_xattr_block(ofs))
1281		f2fs_i_xnid_write(dn->inode, dn->nid);
1282
1283	if (ofs == 0)
1284		inc_valid_inode_count(sbi);
1285	return page;
1286
1287fail:
1288	clear_node_page_dirty(page);
1289	f2fs_put_page(page, 1);
1290	return ERR_PTR(err);
1291}
1292
1293/*
1294 * Caller should do after getting the following values.
1295 * 0: f2fs_put_page(page, 0)
1296 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1297 */
1298static int read_node_page(struct page *page, int op_flags)
1299{
1300	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1301	struct node_info ni;
1302	struct f2fs_io_info fio = {
1303		.sbi = sbi,
1304		.type = NODE,
1305		.op = REQ_OP_READ,
1306		.op_flags = op_flags,
1307		.page = page,
1308		.encrypted_page = NULL,
1309	};
1310	int err;
1311
1312	if (PageUptodate(page)) {
1313		if (!f2fs_inode_chksum_verify(sbi, page)) {
1314			ClearPageUptodate(page);
1315			return -EFSBADCRC;
1316		}
1317		return LOCKED_PAGE;
1318	}
1319
1320	err = f2fs_get_node_info(sbi, page->index, &ni);
1321	if (err)
1322		return err;
1323
1324	/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1325	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
1326			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1327		ClearPageUptodate(page);
1328		return -ENOENT;
1329	}
1330
1331	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1332
1333	err = f2fs_submit_page_bio(&fio);
1334
1335	if (!err)
1336		f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
1337
1338	return err;
1339}
1340
1341/*
1342 * Readahead a node page
1343 */
1344void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1345{
1346	struct page *apage;
1347	int err;
1348
1349	if (!nid)
1350		return;
1351	if (f2fs_check_nid_range(sbi, nid))
1352		return;
1353
1354	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1355	if (apage)
1356		return;
1357
1358	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1359	if (!apage)
1360		return;
1361
1362	err = read_node_page(apage, REQ_RAHEAD);
1363	f2fs_put_page(apage, err ? 1 : 0);
1364}
1365
1366static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1367					struct page *parent, int start)
1368{
1369	struct page *page;
1370	int err;
1371
1372	if (!nid)
1373		return ERR_PTR(-ENOENT);
1374	if (f2fs_check_nid_range(sbi, nid))
1375		return ERR_PTR(-EINVAL);
1376repeat:
1377	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1378	if (!page)
1379		return ERR_PTR(-ENOMEM);
1380
1381	err = read_node_page(page, 0);
1382	if (err < 0) {
1383		f2fs_put_page(page, 1);
1384		return ERR_PTR(err);
1385	} else if (err == LOCKED_PAGE) {
1386		err = 0;
1387		goto page_hit;
1388	}
1389
1390	if (parent)
1391		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1392
1393	lock_page(page);
1394
1395	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1396		f2fs_put_page(page, 1);
1397		goto repeat;
1398	}
1399
1400	if (unlikely(!PageUptodate(page))) {
1401		err = -EIO;
1402		goto out_err;
1403	}
1404
1405	if (!f2fs_inode_chksum_verify(sbi, page)) {
1406		err = -EFSBADCRC;
1407		goto out_err;
1408	}
1409page_hit:
1410	if (unlikely(nid != nid_of_node(page))) {
1411		f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1412			  nid, nid_of_node(page), ino_of_node(page),
1413			  ofs_of_node(page), cpver_of_node(page),
1414			  next_blkaddr_of_node(page));
1415		err = -EINVAL;
1416out_err:
1417		ClearPageUptodate(page);
1418		f2fs_put_page(page, 1);
1419		return ERR_PTR(err);
1420	}
1421	return page;
1422}
1423
1424struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1425{
1426	return __get_node_page(sbi, nid, NULL, 0);
1427}
1428
1429struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1430{
1431	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1432	nid_t nid = get_nid(parent, start, false);
1433
1434	return __get_node_page(sbi, nid, parent, start);
1435}
1436
1437static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1438{
1439	struct inode *inode;
1440	struct page *page;
1441	int ret;
1442
1443	/* should flush inline_data before evict_inode */
1444	inode = ilookup(sbi->sb, ino);
1445	if (!inode)
1446		return;
1447
1448	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1449					FGP_LOCK|FGP_NOWAIT, 0);
1450	if (!page)
1451		goto iput_out;
1452
1453	if (!PageUptodate(page))
1454		goto page_out;
1455
1456	if (!PageDirty(page))
1457		goto page_out;
1458
1459	if (!clear_page_dirty_for_io(page))
1460		goto page_out;
1461
1462	ret = f2fs_write_inline_data(inode, page);
1463	inode_dec_dirty_pages(inode);
1464	f2fs_remove_dirty_inode(inode);
1465	if (ret)
1466		set_page_dirty(page);
1467page_out:
1468	f2fs_put_page(page, 1);
1469iput_out:
1470	iput(inode);
1471}
1472
1473static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1474{
1475	pgoff_t index;
1476	struct pagevec pvec;
1477	struct page *last_page = NULL;
1478	int nr_pages;
1479
1480	pagevec_init(&pvec);
1481	index = 0;
1482
1483	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1484				PAGECACHE_TAG_DIRTY))) {
1485		int i;
1486
1487		for (i = 0; i < nr_pages; i++) {
1488			struct page *page = pvec.pages[i];
1489
1490			if (unlikely(f2fs_cp_error(sbi))) {
1491				f2fs_put_page(last_page, 0);
1492				pagevec_release(&pvec);
1493				return ERR_PTR(-EIO);
1494			}
1495
1496			if (!IS_DNODE(page) || !is_cold_node(page))
1497				continue;
1498			if (ino_of_node(page) != ino)
1499				continue;
1500
1501			lock_page(page);
1502
1503			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1504continue_unlock:
1505				unlock_page(page);
1506				continue;
1507			}
1508			if (ino_of_node(page) != ino)
1509				goto continue_unlock;
1510
1511			if (!PageDirty(page)) {
1512				/* someone wrote it for us */
1513				goto continue_unlock;
1514			}
1515
1516			if (last_page)
1517				f2fs_put_page(last_page, 0);
1518
1519			get_page(page);
1520			last_page = page;
1521			unlock_page(page);
1522		}
1523		pagevec_release(&pvec);
1524		cond_resched();
1525	}
1526	return last_page;
1527}
1528
1529static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1530				struct writeback_control *wbc, bool do_balance,
1531				enum iostat_type io_type, unsigned int *seq_id)
1532{
1533	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1534	nid_t nid;
1535	struct node_info ni;
1536	struct f2fs_io_info fio = {
1537		.sbi = sbi,
1538		.ino = ino_of_node(page),
1539		.type = NODE,
1540		.op = REQ_OP_WRITE,
1541		.op_flags = wbc_to_write_flags(wbc),
1542		.page = page,
1543		.encrypted_page = NULL,
1544		.submitted = false,
1545		.io_type = io_type,
1546		.io_wbc = wbc,
1547	};
1548	unsigned int seq;
1549
1550	trace_f2fs_writepage(page, NODE);
1551
1552	if (unlikely(f2fs_cp_error(sbi))) {
1553		ClearPageUptodate(page);
1554		dec_page_count(sbi, F2FS_DIRTY_NODES);
1555		unlock_page(page);
1556		return 0;
1557	}
1558
1559	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1560		goto redirty_out;
1561
1562	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1563			wbc->sync_mode == WB_SYNC_NONE &&
1564			IS_DNODE(page) && is_cold_node(page))
1565		goto redirty_out;
1566
1567	/* get old block addr of this node page */
1568	nid = nid_of_node(page);
1569	f2fs_bug_on(sbi, page->index != nid);
1570
1571	if (f2fs_get_node_info(sbi, nid, &ni))
1572		goto redirty_out;
1573
1574	if (wbc->for_reclaim) {
1575		if (!down_read_trylock(&sbi->node_write))
1576			goto redirty_out;
1577	} else {
1578		down_read(&sbi->node_write);
1579	}
1580
1581	/* This page is already truncated */
1582	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1583		ClearPageUptodate(page);
1584		dec_page_count(sbi, F2FS_DIRTY_NODES);
1585		up_read(&sbi->node_write);
1586		unlock_page(page);
1587		return 0;
1588	}
1589
1590	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1591		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1592					DATA_GENERIC_ENHANCE)) {
1593		up_read(&sbi->node_write);
1594		goto redirty_out;
1595	}
1596
1597	if (atomic && !test_opt(sbi, NOBARRIER))
1598		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1599
1600	/* should add to global list before clearing PAGECACHE status */
1601	if (f2fs_in_warm_node_list(sbi, page)) {
1602		seq = f2fs_add_fsync_node_entry(sbi, page);
1603		if (seq_id)
1604			*seq_id = seq;
1605	}
1606
1607	set_page_writeback(page);
1608	ClearPageError(page);
1609
1610	fio.old_blkaddr = ni.blk_addr;
1611	f2fs_do_write_node_page(nid, &fio);
1612	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1613	dec_page_count(sbi, F2FS_DIRTY_NODES);
1614	up_read(&sbi->node_write);
1615
1616	if (wbc->for_reclaim) {
1617		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1618		submitted = NULL;
1619	}
1620
1621	unlock_page(page);
1622
1623	if (unlikely(f2fs_cp_error(sbi))) {
1624		f2fs_submit_merged_write(sbi, NODE);
1625		submitted = NULL;
1626	}
1627	if (submitted)
1628		*submitted = fio.submitted;
1629
1630	if (do_balance)
1631		f2fs_balance_fs(sbi, false);
1632	return 0;
1633
1634redirty_out:
1635	redirty_page_for_writepage(wbc, page);
1636	return AOP_WRITEPAGE_ACTIVATE;
1637}
1638
1639int f2fs_move_node_page(struct page *node_page, int gc_type)
1640{
1641	int err = 0;
1642
1643	if (gc_type == FG_GC) {
1644		struct writeback_control wbc = {
1645			.sync_mode = WB_SYNC_ALL,
1646			.nr_to_write = 1,
1647			.for_reclaim = 0,
1648		};
1649
1650		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1651
1652		set_page_dirty(node_page);
1653
1654		if (!clear_page_dirty_for_io(node_page)) {
1655			err = -EAGAIN;
1656			goto out_page;
1657		}
1658
1659		if (__write_node_page(node_page, false, NULL,
1660					&wbc, false, FS_GC_NODE_IO, NULL)) {
1661			err = -EAGAIN;
1662			unlock_page(node_page);
1663		}
1664		goto release_page;
1665	} else {
1666		/* set page dirty and write it */
1667		if (!PageWriteback(node_page))
1668			set_page_dirty(node_page);
1669	}
1670out_page:
1671	unlock_page(node_page);
1672release_page:
1673	f2fs_put_page(node_page, 0);
1674	return err;
1675}
1676
1677static int f2fs_write_node_page(struct page *page,
1678				struct writeback_control *wbc)
1679{
1680	return __write_node_page(page, false, NULL, wbc, false,
1681						FS_NODE_IO, NULL);
1682}
1683
1684int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1685			struct writeback_control *wbc, bool atomic,
1686			unsigned int *seq_id)
1687{
1688	pgoff_t index;
1689	struct pagevec pvec;
1690	int ret = 0;
1691	struct page *last_page = NULL;
1692	bool marked = false;
1693	nid_t ino = inode->i_ino;
1694	int nr_pages;
1695	int nwritten = 0;
1696
1697	if (atomic) {
1698		last_page = last_fsync_dnode(sbi, ino);
1699		if (IS_ERR_OR_NULL(last_page))
1700			return PTR_ERR_OR_ZERO(last_page);
1701	}
1702retry:
1703	pagevec_init(&pvec);
1704	index = 0;
1705
1706	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1707				PAGECACHE_TAG_DIRTY))) {
1708		int i;
1709
1710		for (i = 0; i < nr_pages; i++) {
1711			struct page *page = pvec.pages[i];
1712			bool submitted = false;
1713
1714			if (unlikely(f2fs_cp_error(sbi))) {
1715				f2fs_put_page(last_page, 0);
1716				pagevec_release(&pvec);
1717				ret = -EIO;
1718				goto out;
1719			}
1720
1721			if (!IS_DNODE(page) || !is_cold_node(page))
1722				continue;
1723			if (ino_of_node(page) != ino)
1724				continue;
1725
1726			lock_page(page);
1727
1728			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1729continue_unlock:
1730				unlock_page(page);
1731				continue;
1732			}
1733			if (ino_of_node(page) != ino)
1734				goto continue_unlock;
1735
1736			if (!PageDirty(page) && page != last_page) {
1737				/* someone wrote it for us */
1738				goto continue_unlock;
1739			}
1740
1741			f2fs_wait_on_page_writeback(page, NODE, true, true);
1742
1743			set_fsync_mark(page, 0);
1744			set_dentry_mark(page, 0);
1745
1746			if (!atomic || page == last_page) {
1747				set_fsync_mark(page, 1);
1748				if (IS_INODE(page)) {
1749					if (is_inode_flag_set(inode,
1750								FI_DIRTY_INODE))
1751						f2fs_update_inode(inode, page);
1752					set_dentry_mark(page,
1753						f2fs_need_dentry_mark(sbi, ino));
1754				}
1755				/* may be written by other thread */
1756				if (!PageDirty(page))
1757					set_page_dirty(page);
1758			}
1759
1760			if (!clear_page_dirty_for_io(page))
1761				goto continue_unlock;
1762
1763			ret = __write_node_page(page, atomic &&
1764						page == last_page,
1765						&submitted, wbc, true,
1766						FS_NODE_IO, seq_id);
1767			if (ret) {
1768				unlock_page(page);
1769				f2fs_put_page(last_page, 0);
1770				break;
1771			} else if (submitted) {
1772				nwritten++;
1773			}
1774
1775			if (page == last_page) {
1776				f2fs_put_page(page, 0);
1777				marked = true;
1778				break;
1779			}
1780		}
1781		pagevec_release(&pvec);
1782		cond_resched();
1783
1784		if (ret || marked)
1785			break;
1786	}
1787	if (!ret && atomic && !marked) {
1788		f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1789			   ino, last_page->index);
1790		lock_page(last_page);
1791		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1792		set_page_dirty(last_page);
1793		unlock_page(last_page);
1794		goto retry;
1795	}
1796out:
1797	if (nwritten)
1798		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1799	return ret ? -EIO : 0;
1800}
1801
1802static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1803{
1804	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1805	bool clean;
1806
1807	if (inode->i_ino != ino)
1808		return 0;
1809
1810	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1811		return 0;
1812
1813	spin_lock(&sbi->inode_lock[DIRTY_META]);
1814	clean = list_empty(&F2FS_I(inode)->gdirty_list);
1815	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1816
1817	if (clean)
1818		return 0;
1819
1820	inode = igrab(inode);
1821	if (!inode)
1822		return 0;
1823	return 1;
1824}
1825
1826static bool flush_dirty_inode(struct page *page)
1827{
1828	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1829	struct inode *inode;
1830	nid_t ino = ino_of_node(page);
1831
1832	inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1833	if (!inode)
1834		return false;
1835
1836	f2fs_update_inode(inode, page);
1837	unlock_page(page);
1838
1839	iput(inode);
1840	return true;
1841}
1842
1843void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1844{
1845	pgoff_t index = 0;
1846	struct pagevec pvec;
1847	int nr_pages;
1848
1849	pagevec_init(&pvec);
1850
1851	while ((nr_pages = pagevec_lookup_tag(&pvec,
1852			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1853		int i;
1854
1855		for (i = 0; i < nr_pages; i++) {
1856			struct page *page = pvec.pages[i];
1857
1858			if (!IS_DNODE(page))
1859				continue;
1860
1861			lock_page(page);
1862
1863			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1864continue_unlock:
1865				unlock_page(page);
1866				continue;
1867			}
1868
1869			if (!PageDirty(page)) {
1870				/* someone wrote it for us */
1871				goto continue_unlock;
1872			}
1873
1874			/* flush inline_data, if it's async context. */
1875			if (page_private_inline(page)) {
1876				clear_page_private_inline(page);
1877				unlock_page(page);
1878				flush_inline_data(sbi, ino_of_node(page));
1879				continue;
1880			}
1881			unlock_page(page);
1882		}
1883		pagevec_release(&pvec);
1884		cond_resched();
1885	}
1886}
1887
1888int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1889				struct writeback_control *wbc,
1890				bool do_balance, enum iostat_type io_type)
1891{
1892	pgoff_t index;
1893	struct pagevec pvec;
1894	int step = 0;
1895	int nwritten = 0;
1896	int ret = 0;
1897	int nr_pages, done = 0;
1898
1899	pagevec_init(&pvec);
1900
1901next_step:
1902	index = 0;
1903
1904	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1905			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1906		int i;
1907
1908		for (i = 0; i < nr_pages; i++) {
1909			struct page *page = pvec.pages[i];
1910			bool submitted = false;
1911			bool may_dirty = true;
1912
1913			/* give a priority to WB_SYNC threads */
1914			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1915					wbc->sync_mode == WB_SYNC_NONE) {
1916				done = 1;
1917				break;
1918			}
1919
1920			/*
1921			 * flushing sequence with step:
1922			 * 0. indirect nodes
1923			 * 1. dentry dnodes
1924			 * 2. file dnodes
1925			 */
1926			if (step == 0 && IS_DNODE(page))
1927				continue;
1928			if (step == 1 && (!IS_DNODE(page) ||
1929						is_cold_node(page)))
1930				continue;
1931			if (step == 2 && (!IS_DNODE(page) ||
1932						!is_cold_node(page)))
1933				continue;
1934lock_node:
1935			if (wbc->sync_mode == WB_SYNC_ALL)
1936				lock_page(page);
1937			else if (!trylock_page(page))
1938				continue;
1939
1940			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1941continue_unlock:
1942				unlock_page(page);
1943				continue;
1944			}
1945
1946			if (!PageDirty(page)) {
1947				/* someone wrote it for us */
1948				goto continue_unlock;
1949			}
1950
1951			/* flush inline_data/inode, if it's async context. */
1952			if (!do_balance)
1953				goto write_node;
1954
1955			/* flush inline_data */
1956			if (page_private_inline(page)) {
1957				clear_page_private_inline(page);
1958				unlock_page(page);
1959				flush_inline_data(sbi, ino_of_node(page));
1960				goto lock_node;
1961			}
1962
1963			/* flush dirty inode */
1964			if (IS_INODE(page) && may_dirty) {
1965				may_dirty = false;
1966				if (flush_dirty_inode(page))
1967					goto lock_node;
1968			}
1969write_node:
1970			f2fs_wait_on_page_writeback(page, NODE, true, true);
1971
1972			if (!clear_page_dirty_for_io(page))
1973				goto continue_unlock;
1974
1975			set_fsync_mark(page, 0);
1976			set_dentry_mark(page, 0);
1977
1978			ret = __write_node_page(page, false, &submitted,
1979						wbc, do_balance, io_type, NULL);
1980			if (ret)
1981				unlock_page(page);
1982			else if (submitted)
1983				nwritten++;
1984
1985			if (--wbc->nr_to_write == 0)
1986				break;
1987		}
1988		pagevec_release(&pvec);
1989		cond_resched();
1990
1991		if (wbc->nr_to_write == 0) {
1992			step = 2;
1993			break;
1994		}
1995	}
1996
1997	if (step < 2) {
1998		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1999				wbc->sync_mode == WB_SYNC_NONE && step == 1)
2000			goto out;
2001		step++;
2002		goto next_step;
2003	}
2004out:
2005	if (nwritten)
2006		f2fs_submit_merged_write(sbi, NODE);
2007
2008	if (unlikely(f2fs_cp_error(sbi)))
2009		return -EIO;
2010	return ret;
2011}
2012
2013int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2014						unsigned int seq_id)
2015{
2016	struct fsync_node_entry *fn;
2017	struct page *page;
2018	struct list_head *head = &sbi->fsync_node_list;
2019	unsigned long flags;
2020	unsigned int cur_seq_id = 0;
2021	int ret2, ret = 0;
2022
2023	while (seq_id && cur_seq_id < seq_id) {
2024		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2025		if (list_empty(head)) {
2026			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2027			break;
2028		}
2029		fn = list_first_entry(head, struct fsync_node_entry, list);
2030		if (fn->seq_id > seq_id) {
2031			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2032			break;
2033		}
2034		cur_seq_id = fn->seq_id;
2035		page = fn->page;
2036		get_page(page);
2037		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2038
2039		f2fs_wait_on_page_writeback(page, NODE, true, false);
2040		if (TestClearPageError(page))
2041			ret = -EIO;
2042
2043		put_page(page);
2044
2045		if (ret)
2046			break;
2047	}
2048
2049	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
2050	if (!ret)
2051		ret = ret2;
2052
2053	return ret;
2054}
2055
2056static int f2fs_write_node_pages(struct address_space *mapping,
2057			    struct writeback_control *wbc)
2058{
2059	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2060	struct blk_plug plug;
2061	long diff;
2062
2063	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2064		goto skip_write;
2065
2066	/* balancing f2fs's metadata in background */
2067	f2fs_balance_fs_bg(sbi, true);
2068
2069	/* collect a number of dirty node pages and write together */
2070	if (wbc->sync_mode != WB_SYNC_ALL &&
2071			get_pages(sbi, F2FS_DIRTY_NODES) <
2072					nr_pages_to_skip(sbi, NODE))
2073		goto skip_write;
2074
2075	if (wbc->sync_mode == WB_SYNC_ALL)
2076		atomic_inc(&sbi->wb_sync_req[NODE]);
2077	else if (atomic_read(&sbi->wb_sync_req[NODE]))
2078		goto skip_write;
2079
2080	trace_f2fs_writepages(mapping->host, wbc, NODE);
2081
2082	diff = nr_pages_to_write(sbi, NODE, wbc);
2083	blk_start_plug(&plug);
2084	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2085	blk_finish_plug(&plug);
2086	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2087
2088	if (wbc->sync_mode == WB_SYNC_ALL)
2089		atomic_dec(&sbi->wb_sync_req[NODE]);
2090	return 0;
2091
2092skip_write:
2093	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2094	trace_f2fs_writepages(mapping->host, wbc, NODE);
2095	return 0;
2096}
2097
2098static int f2fs_set_node_page_dirty(struct page *page)
2099{
2100	trace_f2fs_set_page_dirty(page, NODE);
2101
2102	if (!PageUptodate(page))
2103		SetPageUptodate(page);
2104#ifdef CONFIG_F2FS_CHECK_FS
2105	if (IS_INODE(page))
2106		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
2107#endif
2108	if (!PageDirty(page)) {
2109		__set_page_dirty_nobuffers(page);
2110		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2111		set_page_private_reference(page);
2112		return 1;
2113	}
2114	return 0;
2115}
2116
2117/*
2118 * Structure of the f2fs node operations
2119 */
2120const struct address_space_operations f2fs_node_aops = {
2121	.writepage	= f2fs_write_node_page,
2122	.writepages	= f2fs_write_node_pages,
2123	.set_page_dirty	= f2fs_set_node_page_dirty,
2124	.invalidatepage	= f2fs_invalidate_page,
2125	.releasepage	= f2fs_release_page,
2126#ifdef CONFIG_MIGRATION
2127	.migratepage	= f2fs_migrate_page,
2128#endif
2129};
2130
2131static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2132						nid_t n)
2133{
2134	return radix_tree_lookup(&nm_i->free_nid_root, n);
2135}
2136
2137static int __insert_free_nid(struct f2fs_sb_info *sbi,
2138				struct free_nid *i)
2139{
2140	struct f2fs_nm_info *nm_i = NM_I(sbi);
2141	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2142
2143	if (err)
2144		return err;
2145
2146	nm_i->nid_cnt[FREE_NID]++;
2147	list_add_tail(&i->list, &nm_i->free_nid_list);
2148	return 0;
2149}
2150
2151static void __remove_free_nid(struct f2fs_sb_info *sbi,
2152			struct free_nid *i, enum nid_state state)
2153{
2154	struct f2fs_nm_info *nm_i = NM_I(sbi);
2155
2156	f2fs_bug_on(sbi, state != i->state);
2157	nm_i->nid_cnt[state]--;
2158	if (state == FREE_NID)
2159		list_del(&i->list);
2160	radix_tree_delete(&nm_i->free_nid_root, i->nid);
2161}
2162
2163static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2164			enum nid_state org_state, enum nid_state dst_state)
2165{
2166	struct f2fs_nm_info *nm_i = NM_I(sbi);
2167
2168	f2fs_bug_on(sbi, org_state != i->state);
2169	i->state = dst_state;
2170	nm_i->nid_cnt[org_state]--;
2171	nm_i->nid_cnt[dst_state]++;
2172
2173	switch (dst_state) {
2174	case PREALLOC_NID:
2175		list_del(&i->list);
2176		break;
2177	case FREE_NID:
2178		list_add_tail(&i->list, &nm_i->free_nid_list);
2179		break;
2180	default:
2181		BUG_ON(1);
2182	}
2183}
2184
2185static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2186							bool set, bool build)
2187{
2188	struct f2fs_nm_info *nm_i = NM_I(sbi);
2189	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2190	unsigned int nid_ofs = nid - START_NID(nid);
2191
2192	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2193		return;
2194
2195	if (set) {
2196		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2197			return;
2198		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2199		nm_i->free_nid_count[nat_ofs]++;
2200	} else {
2201		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2202			return;
2203		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2204		if (!build)
2205			nm_i->free_nid_count[nat_ofs]--;
2206	}
2207}
2208
2209/* return if the nid is recognized as free */
2210static bool add_free_nid(struct f2fs_sb_info *sbi,
2211				nid_t nid, bool build, bool update)
2212{
2213	struct f2fs_nm_info *nm_i = NM_I(sbi);
2214	struct free_nid *i, *e;
2215	struct nat_entry *ne;
2216	int err = -EINVAL;
2217	bool ret = false;
2218
2219	/* 0 nid should not be used */
2220	if (unlikely(nid == 0))
2221		return false;
2222
2223	if (unlikely(f2fs_check_nid_range(sbi, nid)))
2224		return false;
2225
2226	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2227	i->nid = nid;
2228	i->state = FREE_NID;
2229
2230	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2231
2232	spin_lock(&nm_i->nid_list_lock);
2233
2234	if (build) {
2235		/*
2236		 *   Thread A             Thread B
2237		 *  - f2fs_create
2238		 *   - f2fs_new_inode
2239		 *    - f2fs_alloc_nid
2240		 *     - __insert_nid_to_list(PREALLOC_NID)
2241		 *                     - f2fs_balance_fs_bg
2242		 *                      - f2fs_build_free_nids
2243		 *                       - __f2fs_build_free_nids
2244		 *                        - scan_nat_page
2245		 *                         - add_free_nid
2246		 *                          - __lookup_nat_cache
2247		 *  - f2fs_add_link
2248		 *   - f2fs_init_inode_metadata
2249		 *    - f2fs_new_inode_page
2250		 *     - f2fs_new_node_page
2251		 *      - set_node_addr
2252		 *  - f2fs_alloc_nid_done
2253		 *   - __remove_nid_from_list(PREALLOC_NID)
2254		 *                         - __insert_nid_to_list(FREE_NID)
2255		 */
2256		ne = __lookup_nat_cache(nm_i, nid);
2257		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2258				nat_get_blkaddr(ne) != NULL_ADDR))
2259			goto err_out;
2260
2261		e = __lookup_free_nid_list(nm_i, nid);
2262		if (e) {
2263			if (e->state == FREE_NID)
2264				ret = true;
2265			goto err_out;
2266		}
2267	}
2268	ret = true;
2269	err = __insert_free_nid(sbi, i);
2270err_out:
2271	if (update) {
2272		update_free_nid_bitmap(sbi, nid, ret, build);
2273		if (!build)
2274			nm_i->available_nids++;
2275	}
2276	spin_unlock(&nm_i->nid_list_lock);
2277	radix_tree_preload_end();
2278
2279	if (err)
2280		kmem_cache_free(free_nid_slab, i);
2281	return ret;
2282}
2283
2284static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2285{
2286	struct f2fs_nm_info *nm_i = NM_I(sbi);
2287	struct free_nid *i;
2288	bool need_free = false;
2289
2290	spin_lock(&nm_i->nid_list_lock);
2291	i = __lookup_free_nid_list(nm_i, nid);
2292	if (i && i->state == FREE_NID) {
2293		__remove_free_nid(sbi, i, FREE_NID);
2294		need_free = true;
2295	}
2296	spin_unlock(&nm_i->nid_list_lock);
2297
2298	if (need_free)
2299		kmem_cache_free(free_nid_slab, i);
2300}
2301
2302static int scan_nat_page(struct f2fs_sb_info *sbi,
2303			struct page *nat_page, nid_t start_nid)
2304{
2305	struct f2fs_nm_info *nm_i = NM_I(sbi);
2306	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2307	block_t blk_addr;
2308	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2309	int i;
2310
2311	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2312
2313	i = start_nid % NAT_ENTRY_PER_BLOCK;
2314
2315	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2316		if (unlikely(start_nid >= nm_i->max_nid))
2317			break;
2318
2319		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2320
2321		if (blk_addr == NEW_ADDR)
2322			return -EINVAL;
2323
2324		if (blk_addr == NULL_ADDR) {
2325			add_free_nid(sbi, start_nid, true, true);
2326		} else {
2327			spin_lock(&NM_I(sbi)->nid_list_lock);
2328			update_free_nid_bitmap(sbi, start_nid, false, true);
2329			spin_unlock(&NM_I(sbi)->nid_list_lock);
2330		}
2331	}
2332
2333	return 0;
2334}
2335
2336static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2337{
2338	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2339	struct f2fs_journal *journal = curseg->journal;
2340	int i;
2341
2342	down_read(&curseg->journal_rwsem);
2343	for (i = 0; i < nats_in_cursum(journal); i++) {
2344		block_t addr;
2345		nid_t nid;
2346
2347		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2348		nid = le32_to_cpu(nid_in_journal(journal, i));
2349		if (addr == NULL_ADDR)
2350			add_free_nid(sbi, nid, true, false);
2351		else
2352			remove_free_nid(sbi, nid);
2353	}
2354	up_read(&curseg->journal_rwsem);
2355}
2356
2357static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2358{
2359	struct f2fs_nm_info *nm_i = NM_I(sbi);
2360	unsigned int i, idx;
2361	nid_t nid;
2362
2363	down_read(&nm_i->nat_tree_lock);
2364
2365	for (i = 0; i < nm_i->nat_blocks; i++) {
2366		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2367			continue;
2368		if (!nm_i->free_nid_count[i])
2369			continue;
2370		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2371			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2372						NAT_ENTRY_PER_BLOCK, idx);
2373			if (idx >= NAT_ENTRY_PER_BLOCK)
2374				break;
2375
2376			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2377			add_free_nid(sbi, nid, true, false);
2378
2379			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2380				goto out;
2381		}
2382	}
2383out:
2384	scan_curseg_cache(sbi);
2385
2386	up_read(&nm_i->nat_tree_lock);
2387}
2388
2389static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2390						bool sync, bool mount)
2391{
2392	struct f2fs_nm_info *nm_i = NM_I(sbi);
2393	int i = 0, ret;
2394	nid_t nid = nm_i->next_scan_nid;
2395
2396	if (unlikely(nid >= nm_i->max_nid))
2397		nid = 0;
2398
2399	if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2400		nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2401
2402	/* Enough entries */
2403	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2404		return 0;
2405
2406	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2407		return 0;
2408
2409	if (!mount) {
2410		/* try to find free nids in free_nid_bitmap */
2411		scan_free_nid_bits(sbi);
2412
2413		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2414			return 0;
2415	}
2416
2417	/* readahead nat pages to be scanned */
2418	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2419							META_NAT, true);
2420
2421	down_read(&nm_i->nat_tree_lock);
2422
2423	while (1) {
2424		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2425						nm_i->nat_block_bitmap)) {
2426			struct page *page = get_current_nat_page(sbi, nid);
2427
2428			if (IS_ERR(page)) {
2429				ret = PTR_ERR(page);
2430			} else {
2431				ret = scan_nat_page(sbi, page, nid);
2432				f2fs_put_page(page, 1);
2433			}
2434
2435			if (ret) {
2436				up_read(&nm_i->nat_tree_lock);
2437				f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2438				return ret;
2439			}
2440		}
2441
2442		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2443		if (unlikely(nid >= nm_i->max_nid))
2444			nid = 0;
2445
2446		if (++i >= FREE_NID_PAGES)
2447			break;
2448	}
2449
2450	/* go to the next free nat pages to find free nids abundantly */
2451	nm_i->next_scan_nid = nid;
2452
2453	/* find free nids from current sum_pages */
2454	scan_curseg_cache(sbi);
2455
2456	up_read(&nm_i->nat_tree_lock);
2457
2458	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2459					nm_i->ra_nid_pages, META_NAT, false);
2460
2461	return 0;
2462}
2463
2464int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2465{
2466	int ret;
2467
2468	mutex_lock(&NM_I(sbi)->build_lock);
2469	ret = __f2fs_build_free_nids(sbi, sync, mount);
2470	mutex_unlock(&NM_I(sbi)->build_lock);
2471
2472	return ret;
2473}
2474
2475/*
2476 * If this function returns success, caller can obtain a new nid
2477 * from second parameter of this function.
2478 * The returned nid could be used ino as well as nid when inode is created.
2479 */
2480bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2481{
2482	struct f2fs_nm_info *nm_i = NM_I(sbi);
2483	struct free_nid *i = NULL;
2484retry:
2485	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2486		f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
2487		return false;
2488	}
2489
2490	spin_lock(&nm_i->nid_list_lock);
2491
2492	if (unlikely(nm_i->available_nids == 0)) {
2493		spin_unlock(&nm_i->nid_list_lock);
2494		return false;
2495	}
2496
2497	/* We should not use stale free nids created by f2fs_build_free_nids */
2498	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2499		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2500		i = list_first_entry(&nm_i->free_nid_list,
2501					struct free_nid, list);
2502		*nid = i->nid;
2503
2504		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2505		nm_i->available_nids--;
2506
2507		update_free_nid_bitmap(sbi, *nid, false, false);
2508
2509		spin_unlock(&nm_i->nid_list_lock);
2510		return true;
2511	}
2512	spin_unlock(&nm_i->nid_list_lock);
2513
2514	/* Let's scan nat pages and its caches to get free nids */
2515	if (!f2fs_build_free_nids(sbi, true, false))
2516		goto retry;
2517	return false;
2518}
2519
2520/*
2521 * f2fs_alloc_nid() should be called prior to this function.
2522 */
2523void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2524{
2525	struct f2fs_nm_info *nm_i = NM_I(sbi);
2526	struct free_nid *i;
2527
2528	spin_lock(&nm_i->nid_list_lock);
2529	i = __lookup_free_nid_list(nm_i, nid);
2530	f2fs_bug_on(sbi, !i);
2531	__remove_free_nid(sbi, i, PREALLOC_NID);
2532	spin_unlock(&nm_i->nid_list_lock);
2533
2534	kmem_cache_free(free_nid_slab, i);
2535}
2536
2537/*
2538 * f2fs_alloc_nid() should be called prior to this function.
2539 */
2540void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2541{
2542	struct f2fs_nm_info *nm_i = NM_I(sbi);
2543	struct free_nid *i;
2544	bool need_free = false;
2545
2546	if (!nid)
2547		return;
2548
2549	spin_lock(&nm_i->nid_list_lock);
2550	i = __lookup_free_nid_list(nm_i, nid);
2551	f2fs_bug_on(sbi, !i);
2552
2553	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2554		__remove_free_nid(sbi, i, PREALLOC_NID);
2555		need_free = true;
2556	} else {
2557		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2558	}
2559
2560	nm_i->available_nids++;
2561
2562	update_free_nid_bitmap(sbi, nid, true, false);
2563
2564	spin_unlock(&nm_i->nid_list_lock);
2565
2566	if (need_free)
2567		kmem_cache_free(free_nid_slab, i);
2568}
2569
2570int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2571{
2572	struct f2fs_nm_info *nm_i = NM_I(sbi);
2573	int nr = nr_shrink;
2574
2575	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2576		return 0;
2577
2578	if (!mutex_trylock(&nm_i->build_lock))
2579		return 0;
2580
2581	while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2582		struct free_nid *i, *next;
2583		unsigned int batch = SHRINK_NID_BATCH_SIZE;
2584
2585		spin_lock(&nm_i->nid_list_lock);
2586		list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2587			if (!nr_shrink || !batch ||
2588				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2589				break;
2590			__remove_free_nid(sbi, i, FREE_NID);
2591			kmem_cache_free(free_nid_slab, i);
2592			nr_shrink--;
2593			batch--;
2594		}
2595		spin_unlock(&nm_i->nid_list_lock);
2596	}
2597
2598	mutex_unlock(&nm_i->build_lock);
2599
2600	return nr - nr_shrink;
2601}
2602
2603int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2604{
2605	void *src_addr, *dst_addr;
2606	size_t inline_size;
2607	struct page *ipage;
2608	struct f2fs_inode *ri;
2609
2610	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2611	if (IS_ERR(ipage))
2612		return PTR_ERR(ipage);
2613
2614	ri = F2FS_INODE(page);
2615	if (ri->i_inline & F2FS_INLINE_XATTR) {
2616		if (!f2fs_has_inline_xattr(inode)) {
2617			set_inode_flag(inode, FI_INLINE_XATTR);
2618			stat_inc_inline_xattr(inode);
2619		}
2620	} else {
2621		if (f2fs_has_inline_xattr(inode)) {
2622			stat_dec_inline_xattr(inode);
2623			clear_inode_flag(inode, FI_INLINE_XATTR);
2624		}
2625		goto update_inode;
2626	}
2627
2628	dst_addr = inline_xattr_addr(inode, ipage);
2629	src_addr = inline_xattr_addr(inode, page);
2630	inline_size = inline_xattr_size(inode);
2631
2632	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2633	memcpy(dst_addr, src_addr, inline_size);
2634update_inode:
2635	f2fs_update_inode(inode, ipage);
2636	f2fs_put_page(ipage, 1);
2637	return 0;
2638}
2639
2640int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2641{
2642	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2643	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2644	nid_t new_xnid;
2645	struct dnode_of_data dn;
2646	struct node_info ni;
2647	struct page *xpage;
2648	int err;
2649
2650	if (!prev_xnid)
2651		goto recover_xnid;
2652
2653	/* 1: invalidate the previous xattr nid */
2654	err = f2fs_get_node_info(sbi, prev_xnid, &ni);
2655	if (err)
2656		return err;
2657
2658	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2659	dec_valid_node_count(sbi, inode, false);
2660	set_node_addr(sbi, &ni, NULL_ADDR, false);
2661
2662recover_xnid:
2663	/* 2: update xattr nid in inode */
2664	if (!f2fs_alloc_nid(sbi, &new_xnid))
2665		return -ENOSPC;
2666
2667	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2668	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2669	if (IS_ERR(xpage)) {
2670		f2fs_alloc_nid_failed(sbi, new_xnid);
2671		return PTR_ERR(xpage);
2672	}
2673
2674	f2fs_alloc_nid_done(sbi, new_xnid);
2675	f2fs_update_inode_page(inode);
2676
2677	/* 3: update and set xattr node page dirty */
2678	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2679
2680	set_page_dirty(xpage);
2681	f2fs_put_page(xpage, 1);
2682
2683	return 0;
2684}
2685
2686int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2687{
2688	struct f2fs_inode *src, *dst;
2689	nid_t ino = ino_of_node(page);
2690	struct node_info old_ni, new_ni;
2691	struct page *ipage;
2692	int err;
2693
2694	err = f2fs_get_node_info(sbi, ino, &old_ni);
2695	if (err)
2696		return err;
2697
2698	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2699		return -EINVAL;
2700retry:
2701	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2702	if (!ipage) {
2703		congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2704		goto retry;
2705	}
2706
2707	/* Should not use this inode from free nid list */
2708	remove_free_nid(sbi, ino);
2709
2710	if (!PageUptodate(ipage))
2711		SetPageUptodate(ipage);
2712	fill_node_footer(ipage, ino, ino, 0, true);
2713	set_cold_node(ipage, false);
2714
2715	src = F2FS_INODE(page);
2716	dst = F2FS_INODE(ipage);
2717
2718	memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2719	dst->i_size = 0;
2720	dst->i_blocks = cpu_to_le64(1);
2721	dst->i_links = cpu_to_le32(1);
2722	dst->i_xattr_nid = 0;
2723	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2724	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2725		dst->i_extra_isize = src->i_extra_isize;
2726
2727		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2728			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2729							i_inline_xattr_size))
2730			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2731
2732		if (f2fs_sb_has_project_quota(sbi) &&
2733			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2734								i_projid))
2735			dst->i_projid = src->i_projid;
2736
2737		if (f2fs_sb_has_inode_crtime(sbi) &&
2738			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2739							i_crtime_nsec)) {
2740			dst->i_crtime = src->i_crtime;
2741			dst->i_crtime_nsec = src->i_crtime_nsec;
2742		}
2743	}
2744
2745	new_ni = old_ni;
2746	new_ni.ino = ino;
2747
2748	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2749		WARN_ON(1);
2750	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2751	inc_valid_inode_count(sbi);
2752	set_page_dirty(ipage);
2753	f2fs_put_page(ipage, 1);
2754	return 0;
2755}
2756
2757int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2758			unsigned int segno, struct f2fs_summary_block *sum)
2759{
2760	struct f2fs_node *rn;
2761	struct f2fs_summary *sum_entry;
2762	block_t addr;
2763	int i, idx, last_offset, nrpages;
2764
2765	/* scan the node segment */
2766	last_offset = sbi->blocks_per_seg;
2767	addr = START_BLOCK(sbi, segno);
2768	sum_entry = &sum->entries[0];
2769
2770	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2771		nrpages = bio_max_segs(last_offset - i);
2772
2773		/* readahead node pages */
2774		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2775
2776		for (idx = addr; idx < addr + nrpages; idx++) {
2777			struct page *page = f2fs_get_tmp_page(sbi, idx);
2778
2779			if (IS_ERR(page))
2780				return PTR_ERR(page);
2781
2782			rn = F2FS_NODE(page);
2783			sum_entry->nid = rn->footer.nid;
2784			sum_entry->version = 0;
2785			sum_entry->ofs_in_node = 0;
2786			sum_entry++;
2787			f2fs_put_page(page, 1);
2788		}
2789
2790		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2791							addr + nrpages);
2792	}
2793	return 0;
2794}
2795
2796static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2797{
2798	struct f2fs_nm_info *nm_i = NM_I(sbi);
2799	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2800	struct f2fs_journal *journal = curseg->journal;
2801	int i;
2802
2803	down_write(&curseg->journal_rwsem);
2804	for (i = 0; i < nats_in_cursum(journal); i++) {
2805		struct nat_entry *ne;
2806		struct f2fs_nat_entry raw_ne;
2807		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2808
2809		if (f2fs_check_nid_range(sbi, nid))
2810			continue;
2811
2812		raw_ne = nat_in_journal(journal, i);
2813
2814		ne = __lookup_nat_cache(nm_i, nid);
2815		if (!ne) {
2816			ne = __alloc_nat_entry(nid, true);
2817			__init_nat_entry(nm_i, ne, &raw_ne, true);
2818		}
2819
2820		/*
2821		 * if a free nat in journal has not been used after last
2822		 * checkpoint, we should remove it from available nids,
2823		 * since later we will add it again.
2824		 */
2825		if (!get_nat_flag(ne, IS_DIRTY) &&
2826				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2827			spin_lock(&nm_i->nid_list_lock);
2828			nm_i->available_nids--;
2829			spin_unlock(&nm_i->nid_list_lock);
2830		}
2831
2832		__set_nat_cache_dirty(nm_i, ne);
2833	}
2834	update_nats_in_cursum(journal, -i);
2835	up_write(&curseg->journal_rwsem);
2836}
2837
2838static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2839						struct list_head *head, int max)
2840{
2841	struct nat_entry_set *cur;
2842
2843	if (nes->entry_cnt >= max)
2844		goto add_out;
2845
2846	list_for_each_entry(cur, head, set_list) {
2847		if (cur->entry_cnt >= nes->entry_cnt) {
2848			list_add(&nes->set_list, cur->set_list.prev);
2849			return;
2850		}
2851	}
2852add_out:
2853	list_add_tail(&nes->set_list, head);
2854}
2855
2856static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2857						struct page *page)
2858{
2859	struct f2fs_nm_info *nm_i = NM_I(sbi);
2860	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2861	struct f2fs_nat_block *nat_blk = page_address(page);
2862	int valid = 0;
2863	int i = 0;
2864
2865	if (!enabled_nat_bits(sbi, NULL))
2866		return;
2867
2868	if (nat_index == 0) {
2869		valid = 1;
2870		i = 1;
2871	}
2872	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2873		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2874			valid++;
2875	}
2876	if (valid == 0) {
2877		__set_bit_le(nat_index, nm_i->empty_nat_bits);
2878		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2879		return;
2880	}
2881
2882	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
2883	if (valid == NAT_ENTRY_PER_BLOCK)
2884		__set_bit_le(nat_index, nm_i->full_nat_bits);
2885	else
2886		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2887}
2888
2889static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2890		struct nat_entry_set *set, struct cp_control *cpc)
2891{
2892	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2893	struct f2fs_journal *journal = curseg->journal;
2894	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2895	bool to_journal = true;
2896	struct f2fs_nat_block *nat_blk;
2897	struct nat_entry *ne, *cur;
2898	struct page *page = NULL;
2899
2900	/*
2901	 * there are two steps to flush nat entries:
2902	 * #1, flush nat entries to journal in current hot data summary block.
2903	 * #2, flush nat entries to nat page.
2904	 */
2905	if (enabled_nat_bits(sbi, cpc) ||
2906		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2907		to_journal = false;
2908
2909	if (to_journal) {
2910		down_write(&curseg->journal_rwsem);
2911	} else {
2912		page = get_next_nat_page(sbi, start_nid);
2913		if (IS_ERR(page))
2914			return PTR_ERR(page);
2915
2916		nat_blk = page_address(page);
2917		f2fs_bug_on(sbi, !nat_blk);
2918	}
2919
2920	/* flush dirty nats in nat entry set */
2921	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2922		struct f2fs_nat_entry *raw_ne;
2923		nid_t nid = nat_get_nid(ne);
2924		int offset;
2925
2926		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2927
2928		if (to_journal) {
2929			offset = f2fs_lookup_journal_in_cursum(journal,
2930							NAT_JOURNAL, nid, 1);
2931			f2fs_bug_on(sbi, offset < 0);
2932			raw_ne = &nat_in_journal(journal, offset);
2933			nid_in_journal(journal, offset) = cpu_to_le32(nid);
2934		} else {
2935			raw_ne = &nat_blk->entries[nid - start_nid];
2936		}
2937		raw_nat_from_node_info(raw_ne, &ne->ni);
2938		nat_reset_flag(ne);
2939		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
2940		if (nat_get_blkaddr(ne) == NULL_ADDR) {
2941			add_free_nid(sbi, nid, false, true);
2942		} else {
2943			spin_lock(&NM_I(sbi)->nid_list_lock);
2944			update_free_nid_bitmap(sbi, nid, false, false);
2945			spin_unlock(&NM_I(sbi)->nid_list_lock);
2946		}
2947	}
2948
2949	if (to_journal) {
2950		up_write(&curseg->journal_rwsem);
2951	} else {
2952		__update_nat_bits(sbi, start_nid, page);
2953		f2fs_put_page(page, 1);
2954	}
2955
2956	/* Allow dirty nats by node block allocation in write_begin */
2957	if (!set->entry_cnt) {
2958		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2959		kmem_cache_free(nat_entry_set_slab, set);
2960	}
2961	return 0;
2962}
2963
2964/*
2965 * This function is called during the checkpointing process.
2966 */
2967int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2968{
2969	struct f2fs_nm_info *nm_i = NM_I(sbi);
2970	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2971	struct f2fs_journal *journal = curseg->journal;
2972	struct nat_entry_set *setvec[SETVEC_SIZE];
2973	struct nat_entry_set *set, *tmp;
2974	unsigned int found;
2975	nid_t set_idx = 0;
2976	LIST_HEAD(sets);
2977	int err = 0;
2978
2979	/*
2980	 * during unmount, let's flush nat_bits before checking
2981	 * nat_cnt[DIRTY_NAT].
2982	 */
2983	if (enabled_nat_bits(sbi, cpc)) {
2984		down_write(&nm_i->nat_tree_lock);
2985		remove_nats_in_journal(sbi);
2986		up_write(&nm_i->nat_tree_lock);
2987	}
2988
2989	if (!nm_i->nat_cnt[DIRTY_NAT])
2990		return 0;
2991
2992	down_write(&nm_i->nat_tree_lock);
2993
2994	/*
2995	 * if there are no enough space in journal to store dirty nat
2996	 * entries, remove all entries from journal and merge them
2997	 * into nat entry set.
2998	 */
2999	if (enabled_nat_bits(sbi, cpc) ||
3000		!__has_cursum_space(journal,
3001			nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3002		remove_nats_in_journal(sbi);
3003
3004	while ((found = __gang_lookup_nat_set(nm_i,
3005					set_idx, SETVEC_SIZE, setvec))) {
3006		unsigned idx;
3007
3008		set_idx = setvec[found - 1]->set + 1;
3009		for (idx = 0; idx < found; idx++)
3010			__adjust_nat_entry_set(setvec[idx], &sets,
3011						MAX_NAT_JENTRIES(journal));
3012	}
3013
3014	/* flush dirty nats in nat entry set */
3015	list_for_each_entry_safe(set, tmp, &sets, set_list) {
3016		err = __flush_nat_entry_set(sbi, set, cpc);
3017		if (err)
3018			break;
3019	}
3020
3021	up_write(&nm_i->nat_tree_lock);
3022	/* Allow dirty nats by node block allocation in write_begin */
3023
3024	return err;
3025}
3026
3027static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3028{
3029	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3030	struct f2fs_nm_info *nm_i = NM_I(sbi);
3031	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3032	unsigned int i;
3033	__u64 cp_ver = cur_cp_version(ckpt);
3034	block_t nat_bits_addr;
3035
3036	if (!enabled_nat_bits(sbi, NULL))
3037		return 0;
3038
3039	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3040	nm_i->nat_bits = f2fs_kvzalloc(sbi,
3041			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3042	if (!nm_i->nat_bits)
3043		return -ENOMEM;
3044
3045	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3046						nm_i->nat_bits_blocks;
3047	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3048		struct page *page;
3049
3050		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3051		if (IS_ERR(page))
3052			return PTR_ERR(page);
3053
3054		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3055					page_address(page), F2FS_BLKSIZE);
3056		f2fs_put_page(page, 1);
3057	}
3058
3059	cp_ver |= (cur_cp_crc(ckpt) << 32);
3060	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3061		disable_nat_bits(sbi, true);
3062		return 0;
3063	}
3064
3065	nm_i->full_nat_bits = nm_i->nat_bits + 8;
3066	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3067
3068	f2fs_notice(sbi, "Found nat_bits in checkpoint");
3069	return 0;
3070}
3071
3072static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3073{
3074	struct f2fs_nm_info *nm_i = NM_I(sbi);
3075	unsigned int i = 0;
3076	nid_t nid, last_nid;
3077
3078	if (!enabled_nat_bits(sbi, NULL))
3079		return;
3080
3081	for (i = 0; i < nm_i->nat_blocks; i++) {
3082		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3083		if (i >= nm_i->nat_blocks)
3084			break;
3085
3086		__set_bit_le(i, nm_i->nat_block_bitmap);
3087
3088		nid = i * NAT_ENTRY_PER_BLOCK;
3089		last_nid = nid + NAT_ENTRY_PER_BLOCK;
3090
3091		spin_lock(&NM_I(sbi)->nid_list_lock);
3092		for (; nid < last_nid; nid++)
3093			update_free_nid_bitmap(sbi, nid, true, true);
3094		spin_unlock(&NM_I(sbi)->nid_list_lock);
3095	}
3096
3097	for (i = 0; i < nm_i->nat_blocks; i++) {
3098		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3099		if (i >= nm_i->nat_blocks)
3100			break;
3101
3102		__set_bit_le(i, nm_i->nat_block_bitmap);
3103	}
3104}
3105
3106static int init_node_manager(struct f2fs_sb_info *sbi)
3107{
3108	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3109	struct f2fs_nm_info *nm_i = NM_I(sbi);
3110	unsigned char *version_bitmap;
3111	unsigned int nat_segs;
3112	int err;
3113
3114	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3115
3116	/* segment_count_nat includes pair segment so divide to 2. */
3117	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3118	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3119	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3120
3121	/* not used nids: 0, node, meta, (and root counted as valid node) */
3122	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3123						F2FS_RESERVED_NODE_NUM;
3124	nm_i->nid_cnt[FREE_NID] = 0;
3125	nm_i->nid_cnt[PREALLOC_NID] = 0;
3126	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3127	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3128	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3129
3130	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3131	INIT_LIST_HEAD(&nm_i->free_nid_list);
3132	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3133	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3134	INIT_LIST_HEAD(&nm_i->nat_entries);
3135	spin_lock_init(&nm_i->nat_list_lock);
3136
3137	mutex_init(&nm_i->build_lock);
3138	spin_lock_init(&nm_i->nid_list_lock);
3139	init_rwsem(&nm_i->nat_tree_lock);
3140
3141	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3142	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3143	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3144	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3145					GFP_KERNEL);
3146	if (!nm_i->nat_bitmap)
3147		return -ENOMEM;
3148
3149	err = __get_nat_bitmaps(sbi);
3150	if (err)
3151		return err;
3152
3153#ifdef CONFIG_F2FS_CHECK_FS
3154	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3155					GFP_KERNEL);
3156	if (!nm_i->nat_bitmap_mir)
3157		return -ENOMEM;
3158#endif
3159
3160	return 0;
3161}
3162
3163static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3164{
3165	struct f2fs_nm_info *nm_i = NM_I(sbi);
3166	int i;
3167
3168	nm_i->free_nid_bitmap =
3169		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3170					      nm_i->nat_blocks),
3171			      GFP_KERNEL);
3172	if (!nm_i->free_nid_bitmap)
3173		return -ENOMEM;
3174
3175	for (i = 0; i < nm_i->nat_blocks; i++) {
3176		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3177			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3178		if (!nm_i->free_nid_bitmap[i])
3179			return -ENOMEM;
3180	}
3181
3182	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3183								GFP_KERNEL);
3184	if (!nm_i->nat_block_bitmap)
3185		return -ENOMEM;
3186
3187	nm_i->free_nid_count =
3188		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3189					      nm_i->nat_blocks),
3190			      GFP_KERNEL);
3191	if (!nm_i->free_nid_count)
3192		return -ENOMEM;
3193	return 0;
3194}
3195
3196int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3197{
3198	int err;
3199
3200	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3201							GFP_KERNEL);
3202	if (!sbi->nm_info)
3203		return -ENOMEM;
3204
3205	err = init_node_manager(sbi);
3206	if (err)
3207		return err;
3208
3209	err = init_free_nid_cache(sbi);
3210	if (err)
3211		return err;
3212
3213	/* load free nid status from nat_bits table */
3214	load_free_nid_bitmap(sbi);
3215
3216	return f2fs_build_free_nids(sbi, true, true);
3217}
3218
3219void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3220{
3221	struct f2fs_nm_info *nm_i = NM_I(sbi);
3222	struct free_nid *i, *next_i;
3223	struct nat_entry *natvec[NATVEC_SIZE];
3224	struct nat_entry_set *setvec[SETVEC_SIZE];
3225	nid_t nid = 0;
3226	unsigned int found;
3227
3228	if (!nm_i)
3229		return;
3230
3231	/* destroy free nid list */
3232	spin_lock(&nm_i->nid_list_lock);
3233	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3234		__remove_free_nid(sbi, i, FREE_NID);
3235		spin_unlock(&nm_i->nid_list_lock);
3236		kmem_cache_free(free_nid_slab, i);
3237		spin_lock(&nm_i->nid_list_lock);
3238	}
3239	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3240	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3241	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3242	spin_unlock(&nm_i->nid_list_lock);
3243
3244	/* destroy nat cache */
3245	down_write(&nm_i->nat_tree_lock);
3246	while ((found = __gang_lookup_nat_cache(nm_i,
3247					nid, NATVEC_SIZE, natvec))) {
3248		unsigned idx;
3249
3250		nid = nat_get_nid(natvec[found - 1]) + 1;
3251		for (idx = 0; idx < found; idx++) {
3252			spin_lock(&nm_i->nat_list_lock);
3253			list_del(&natvec[idx]->list);
3254			spin_unlock(&nm_i->nat_list_lock);
3255
3256			__del_from_nat_cache(nm_i, natvec[idx]);
3257		}
3258	}
3259	f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3260
3261	/* destroy nat set cache */
3262	nid = 0;
3263	while ((found = __gang_lookup_nat_set(nm_i,
3264					nid, SETVEC_SIZE, setvec))) {
3265		unsigned idx;
3266
3267		nid = setvec[found - 1]->set + 1;
3268		for (idx = 0; idx < found; idx++) {
3269			/* entry_cnt is not zero, when cp_error was occurred */
3270			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3271			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3272			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3273		}
3274	}
3275	up_write(&nm_i->nat_tree_lock);
3276
3277	kvfree(nm_i->nat_block_bitmap);
3278	if (nm_i->free_nid_bitmap) {
3279		int i;
3280
3281		for (i = 0; i < nm_i->nat_blocks; i++)
3282			kvfree(nm_i->free_nid_bitmap[i]);
3283		kvfree(nm_i->free_nid_bitmap);
3284	}
3285	kvfree(nm_i->free_nid_count);
3286
3287	kvfree(nm_i->nat_bitmap);
3288	kvfree(nm_i->nat_bits);
3289#ifdef CONFIG_F2FS_CHECK_FS
3290	kvfree(nm_i->nat_bitmap_mir);
3291#endif
3292	sbi->nm_info = NULL;
3293	kfree(nm_i);
3294}
3295
3296int __init f2fs_create_node_manager_caches(void)
3297{
3298	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3299			sizeof(struct nat_entry));
3300	if (!nat_entry_slab)
3301		goto fail;
3302
3303	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3304			sizeof(struct free_nid));
3305	if (!free_nid_slab)
3306		goto destroy_nat_entry;
3307
3308	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3309			sizeof(struct nat_entry_set));
3310	if (!nat_entry_set_slab)
3311		goto destroy_free_nid;
3312
3313	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3314			sizeof(struct fsync_node_entry));
3315	if (!fsync_node_entry_slab)
3316		goto destroy_nat_entry_set;
3317	return 0;
3318
3319destroy_nat_entry_set:
3320	kmem_cache_destroy(nat_entry_set_slab);
3321destroy_free_nid:
3322	kmem_cache_destroy(free_nid_slab);
3323destroy_nat_entry:
3324	kmem_cache_destroy(nat_entry_slab);
3325fail:
3326	return -ENOMEM;
3327}
3328
3329void f2fs_destroy_node_manager_caches(void)
3330{
3331	kmem_cache_destroy(fsync_node_entry_slab);
3332	kmem_cache_destroy(nat_entry_set_slab);
3333	kmem_cache_destroy(free_nid_slab);
3334	kmem_cache_destroy(nat_entry_slab);
3335}