Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * f2fs extent cache support
   4 *
   5 * Copyright (c) 2015 Motorola Mobility
   6 * Copyright (c) 2015 Samsung Electronics
   7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
   8 *          Chao Yu <chao2.yu@samsung.com>
   9 *
  10 * block_age-based extent cache added by:
  11 * Copyright (c) 2022 xiaomi Co., Ltd.
  12 *             http://www.xiaomi.com/
  13 */
  14
  15#include <linux/fs.h>
  16#include <linux/f2fs_fs.h>
  17
  18#include "f2fs.h"
  19#include "node.h"
  20#include <trace/events/f2fs.h>
  21
  22bool sanity_check_extent_cache(struct inode *inode, struct page *ipage)
  23{
  24	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  25	struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
  26	struct extent_info ei;
  27	int devi;
  28
  29	get_read_extent_info(&ei, i_ext);
  30
  31	if (!ei.len)
  32		return true;
  33
  34	if (!f2fs_is_valid_blkaddr(sbi, ei.blk, DATA_GENERIC_ENHANCE) ||
  35	    !f2fs_is_valid_blkaddr(sbi, ei.blk + ei.len - 1,
  36					DATA_GENERIC_ENHANCE)) {
  37		f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
  38			  __func__, inode->i_ino,
  39			  ei.blk, ei.fofs, ei.len);
  40		return false;
  41	}
  42
  43	if (!IS_DEVICE_ALIASING(inode))
  44		return true;
  45
  46	for (devi = 0; devi < sbi->s_ndevs; devi++) {
  47		if (FDEV(devi).start_blk != ei.blk ||
  48				FDEV(devi).end_blk != ei.blk + ei.len - 1)
  49			continue;
  50
  51		if (devi == 0) {
  52			f2fs_warn(sbi,
  53			    "%s: inode (ino=%lx) is an alias of meta device",
  54			    __func__, inode->i_ino);
  55			return false;
  56		}
  57
  58		if (bdev_is_zoned(FDEV(devi).bdev)) {
  59			f2fs_warn(sbi,
  60			    "%s: device alias inode (ino=%lx)'s extent info "
  61			    "[%u, %u, %u] maps to zoned block device",
  62			    __func__, inode->i_ino, ei.blk, ei.fofs, ei.len);
  63			return false;
  64		}
  65		return true;
  66	}
  67
  68	f2fs_warn(sbi, "%s: device alias inode (ino=%lx)'s extent info "
  69			"[%u, %u, %u] is inconsistent w/ any devices",
  70			__func__, inode->i_ino, ei.blk, ei.fofs, ei.len);
  71	return false;
  72}
  73
  74static void __set_extent_info(struct extent_info *ei,
  75				unsigned int fofs, unsigned int len,
  76				block_t blk, bool keep_clen,
  77				unsigned long age, unsigned long last_blocks,
  78				enum extent_type type)
  79{
  80	ei->fofs = fofs;
  81	ei->len = len;
  82
  83	if (type == EX_READ) {
  84		ei->blk = blk;
  85		if (keep_clen)
  86			return;
  87#ifdef CONFIG_F2FS_FS_COMPRESSION
  88		ei->c_len = 0;
  89#endif
  90	} else if (type == EX_BLOCK_AGE) {
  91		ei->age = age;
  92		ei->last_blocks = last_blocks;
  93	}
  94}
  95
  96static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
  97{
  98	if (type == EX_READ)
  99		return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
 100			S_ISREG(inode->i_mode);
 101	if (type == EX_BLOCK_AGE)
 102		return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
 103			(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
 104	return false;
 105}
 106
 107static bool __may_extent_tree(struct inode *inode, enum extent_type type)
 108{
 109	if (IS_DEVICE_ALIASING(inode) && type == EX_READ)
 110		return true;
 111
 112	/*
 113	 * for recovered files during mount do not create extents
 114	 * if shrinker is not registered.
 115	 */
 116	if (list_empty(&F2FS_I_SB(inode)->s_list))
 117		return false;
 118
 119	if (!__init_may_extent_tree(inode, type))
 120		return false;
 121
 122	if (type == EX_READ) {
 123		if (is_inode_flag_set(inode, FI_NO_EXTENT))
 124			return false;
 125		if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
 126				 !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
 127			return false;
 128	} else if (type == EX_BLOCK_AGE) {
 129		if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
 130			return false;
 131		if (file_is_cold(inode))
 132			return false;
 133	}
 134	return true;
 135}
 136
 137static void __try_update_largest_extent(struct extent_tree *et,
 138						struct extent_node *en)
 139{
 140	if (et->type != EX_READ)
 141		return;
 142	if (en->ei.len <= et->largest.len)
 143		return;
 144
 145	et->largest = en->ei;
 146	et->largest_updated = true;
 147}
 148
 149static bool __is_extent_mergeable(struct extent_info *back,
 150		struct extent_info *front, enum extent_type type)
 151{
 152	if (type == EX_READ) {
 153#ifdef CONFIG_F2FS_FS_COMPRESSION
 154		if (back->c_len && back->len != back->c_len)
 155			return false;
 156		if (front->c_len && front->len != front->c_len)
 157			return false;
 158#endif
 159		return (back->fofs + back->len == front->fofs &&
 160				back->blk + back->len == front->blk);
 161	} else if (type == EX_BLOCK_AGE) {
 162		return (back->fofs + back->len == front->fofs &&
 163			abs(back->age - front->age) <= SAME_AGE_REGION &&
 164			abs(back->last_blocks - front->last_blocks) <=
 165							SAME_AGE_REGION);
 166	}
 167	return false;
 168}
 169
 170static bool __is_back_mergeable(struct extent_info *cur,
 171		struct extent_info *back, enum extent_type type)
 172{
 173	return __is_extent_mergeable(back, cur, type);
 174}
 175
 176static bool __is_front_mergeable(struct extent_info *cur,
 177		struct extent_info *front, enum extent_type type)
 178{
 179	return __is_extent_mergeable(cur, front, type);
 180}
 181
 182static struct extent_node *__lookup_extent_node(struct rb_root_cached *root,
 183			struct extent_node *cached_en, unsigned int fofs)
 184{
 185	struct rb_node *node = root->rb_root.rb_node;
 186	struct extent_node *en;
 187
 188	/* check a cached entry */
 189	if (cached_en && cached_en->ei.fofs <= fofs &&
 190			cached_en->ei.fofs + cached_en->ei.len > fofs)
 191		return cached_en;
 192
 193	/* check rb_tree */
 194	while (node) {
 195		en = rb_entry(node, struct extent_node, rb_node);
 196
 197		if (fofs < en->ei.fofs)
 198			node = node->rb_left;
 199		else if (fofs >= en->ei.fofs + en->ei.len)
 200			node = node->rb_right;
 201		else
 202			return en;
 203	}
 204	return NULL;
 205}
 206
 207/*
 208 * lookup rb entry in position of @fofs in rb-tree,
 209 * if hit, return the entry, otherwise, return NULL
 210 * @prev_ex: extent before fofs
 211 * @next_ex: extent after fofs
 212 * @insert_p: insert point for new extent at fofs
 213 * in order to simplify the insertion after.
 214 * tree must stay unchanged between lookup and insertion.
 215 */
 216static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root,
 217				struct extent_node *cached_en,
 218				unsigned int fofs,
 219				struct extent_node **prev_entry,
 220				struct extent_node **next_entry,
 221				struct rb_node ***insert_p,
 222				struct rb_node **insert_parent,
 223				bool *leftmost)
 224{
 225	struct rb_node **pnode = &root->rb_root.rb_node;
 226	struct rb_node *parent = NULL, *tmp_node;
 227	struct extent_node *en = cached_en;
 228
 229	*insert_p = NULL;
 230	*insert_parent = NULL;
 231	*prev_entry = NULL;
 232	*next_entry = NULL;
 233
 234	if (RB_EMPTY_ROOT(&root->rb_root))
 235		return NULL;
 236
 237	if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs)
 238		goto lookup_neighbors;
 239
 240	*leftmost = true;
 241
 242	while (*pnode) {
 243		parent = *pnode;
 244		en = rb_entry(*pnode, struct extent_node, rb_node);
 245
 246		if (fofs < en->ei.fofs) {
 247			pnode = &(*pnode)->rb_left;
 248		} else if (fofs >= en->ei.fofs + en->ei.len) {
 249			pnode = &(*pnode)->rb_right;
 250			*leftmost = false;
 251		} else {
 252			goto lookup_neighbors;
 253		}
 254	}
 255
 256	*insert_p = pnode;
 257	*insert_parent = parent;
 258
 259	en = rb_entry(parent, struct extent_node, rb_node);
 260	tmp_node = parent;
 261	if (parent && fofs > en->ei.fofs)
 262		tmp_node = rb_next(parent);
 263	*next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
 264
 265	tmp_node = parent;
 266	if (parent && fofs < en->ei.fofs)
 267		tmp_node = rb_prev(parent);
 268	*prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
 269	return NULL;
 270
 271lookup_neighbors:
 272	if (fofs == en->ei.fofs) {
 273		/* lookup prev node for merging backward later */
 274		tmp_node = rb_prev(&en->rb_node);
 275		*prev_entry = rb_entry_safe(tmp_node,
 276					struct extent_node, rb_node);
 277	}
 278	if (fofs == en->ei.fofs + en->ei.len - 1) {
 279		/* lookup next node for merging frontward later */
 280		tmp_node = rb_next(&en->rb_node);
 281		*next_entry = rb_entry_safe(tmp_node,
 282					struct extent_node, rb_node);
 283	}
 284	return en;
 285}
 286
 287static struct kmem_cache *extent_tree_slab;
 288static struct kmem_cache *extent_node_slab;
 289
 290static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 291				struct extent_tree *et, struct extent_info *ei,
 292				struct rb_node *parent, struct rb_node **p,
 293				bool leftmost)
 294{
 295	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
 296	struct extent_node *en;
 297
 298	en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
 299	if (!en)
 300		return NULL;
 301
 302	en->ei = *ei;
 303	INIT_LIST_HEAD(&en->list);
 304	en->et = et;
 305
 306	rb_link_node(&en->rb_node, parent, p);
 307	rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
 308	atomic_inc(&et->node_cnt);
 309	atomic_inc(&eti->total_ext_node);
 310	return en;
 311}
 312
 313static void __detach_extent_node(struct f2fs_sb_info *sbi,
 314				struct extent_tree *et, struct extent_node *en)
 315{
 316	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
 317
 318	rb_erase_cached(&en->rb_node, &et->root);
 319	atomic_dec(&et->node_cnt);
 320	atomic_dec(&eti->total_ext_node);
 321
 322	if (et->cached_en == en)
 323		et->cached_en = NULL;
 324	kmem_cache_free(extent_node_slab, en);
 325}
 326
 327/*
 328 * Flow to release an extent_node:
 329 * 1. list_del_init
 330 * 2. __detach_extent_node
 331 * 3. kmem_cache_free.
 332 */
 333static void __release_extent_node(struct f2fs_sb_info *sbi,
 334			struct extent_tree *et, struct extent_node *en)
 335{
 336	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
 337
 338	spin_lock(&eti->extent_lock);
 339	f2fs_bug_on(sbi, list_empty(&en->list));
 340	list_del_init(&en->list);
 341	spin_unlock(&eti->extent_lock);
 342
 343	__detach_extent_node(sbi, et, en);
 344}
 345
 346static struct extent_tree *__grab_extent_tree(struct inode *inode,
 347						enum extent_type type)
 348{
 349	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 350	struct extent_tree_info *eti = &sbi->extent_tree[type];
 351	struct extent_tree *et;
 352	nid_t ino = inode->i_ino;
 353
 354	mutex_lock(&eti->extent_tree_lock);
 355	et = radix_tree_lookup(&eti->extent_tree_root, ino);
 356	if (!et) {
 357		et = f2fs_kmem_cache_alloc(extent_tree_slab,
 358					GFP_NOFS, true, NULL);
 359		f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
 360		memset(et, 0, sizeof(struct extent_tree));
 361		et->ino = ino;
 362		et->type = type;
 363		et->root = RB_ROOT_CACHED;
 364		et->cached_en = NULL;
 365		rwlock_init(&et->lock);
 366		INIT_LIST_HEAD(&et->list);
 367		atomic_set(&et->node_cnt, 0);
 368		atomic_inc(&eti->total_ext_tree);
 369	} else {
 370		atomic_dec(&eti->total_zombie_tree);
 371		list_del_init(&et->list);
 372	}
 373	mutex_unlock(&eti->extent_tree_lock);
 374
 375	/* never died until evict_inode */
 376	F2FS_I(inode)->extent_tree[type] = et;
 377
 378	return et;
 379}
 380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
 382				struct extent_tree *et, unsigned int nr_shrink)
 383{
 384	struct rb_node *node, *next;
 385	struct extent_node *en;
 386	unsigned int count;
 387
 388	node = rb_first_cached(&et->root);
 389
 390	for (count = 0; node && count < nr_shrink; count++) {
 
 391		next = rb_next(node);
 392		en = rb_entry(node, struct extent_node, rb_node);
 393		__release_extent_node(sbi, et, en);
 394		node = next;
 395	}
 396
 397	return count;
 398}
 399
 400static void __drop_largest_extent(struct extent_tree *et,
 401					pgoff_t fofs, unsigned int len)
 402{
 403	if (fofs < (pgoff_t)et->largest.fofs + et->largest.len &&
 404			fofs + len > et->largest.fofs) {
 405		et->largest.len = 0;
 406		et->largest_updated = true;
 407	}
 408}
 409
 410void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
 
 411{
 412	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 413	struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
 414	struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
 415	struct extent_tree *et;
 416	struct extent_node *en;
 417	struct extent_info ei;
 418
 419	if (!__may_extent_tree(inode, EX_READ)) {
 420		/* drop largest read extent */
 421		if (i_ext->len) {
 422			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 423			i_ext->len = 0;
 424			set_page_dirty(ipage);
 425		}
 426		set_inode_flag(inode, FI_NO_EXTENT);
 427		return;
 428	}
 429
 430	et = __grab_extent_tree(inode, EX_READ);
 431
 432	get_read_extent_info(&ei, i_ext);
 
 433
 434	write_lock(&et->lock);
 435	if (atomic_read(&et->node_cnt) || !ei.len)
 436		goto skip;
 437
 438	if (IS_DEVICE_ALIASING(inode)) {
 439		et->largest = ei;
 440		goto skip;
 441	}
 442
 443	en = __attach_extent_node(sbi, et, &ei, NULL,
 444				&et->root.rb_root.rb_node, true);
 445	if (en) {
 446		et->largest = en->ei;
 447		et->cached_en = en;
 448
 449		spin_lock(&eti->extent_lock);
 450		list_add_tail(&en->list, &eti->extent_list);
 451		spin_unlock(&eti->extent_lock);
 452	}
 453skip:
 454	/* Let's drop, if checkpoint got corrupted. */
 455	if (f2fs_cp_error(sbi)) {
 456		et->largest.len = 0;
 457		et->largest_updated = true;
 458	}
 
 459	write_unlock(&et->lock);
 
 460}
 461
 462void f2fs_init_age_extent_tree(struct inode *inode)
 463{
 464	if (!__init_may_extent_tree(inode, EX_BLOCK_AGE))
 465		return;
 466	__grab_extent_tree(inode, EX_BLOCK_AGE);
 467}
 468
 469void f2fs_init_extent_tree(struct inode *inode)
 470{
 471	/* initialize read cache */
 472	if (__init_may_extent_tree(inode, EX_READ))
 473		__grab_extent_tree(inode, EX_READ);
 474
 475	/* initialize block age cache */
 476	if (__init_may_extent_tree(inode, EX_BLOCK_AGE))
 477		__grab_extent_tree(inode, EX_BLOCK_AGE);
 478}
 479
 480static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
 481			struct extent_info *ei, enum extent_type type)
 482{
 483	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 484	struct extent_tree_info *eti = &sbi->extent_tree[type];
 485	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
 486	struct extent_node *en;
 487	bool ret = false;
 488
 489	if (!et)
 490		return false;
 491
 492	trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
 493
 494	read_lock(&et->lock);
 495
 496	if (type == EX_READ &&
 497			et->largest.fofs <= pgofs &&
 498			(pgoff_t)et->largest.fofs + et->largest.len > pgofs) {
 499		*ei = et->largest;
 500		ret = true;
 501		stat_inc_largest_node_hit(sbi);
 502		goto out;
 503	}
 504
 505	if (IS_DEVICE_ALIASING(inode)) {
 506		ret = false;
 507		goto out;
 
 
 
 
 
 
 
 508	}
 
 
 
 509
 510	en = __lookup_extent_node(&et->root, et->cached_en, pgofs);
 511	if (!en)
 512		goto out;
 513
 514	if (en == et->cached_en)
 515		stat_inc_cached_node_hit(sbi, type);
 516	else
 517		stat_inc_rbtree_node_hit(sbi, type);
 518
 519	*ei = en->ei;
 520	spin_lock(&eti->extent_lock);
 521	if (!list_empty(&en->list)) {
 522		list_move_tail(&en->list, &eti->extent_list);
 523		et->cached_en = en;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	}
 525	spin_unlock(&eti->extent_lock);
 526	ret = true;
 527out:
 528	stat_inc_total_hit(sbi, type);
 529	read_unlock(&et->lock);
 530
 531	if (type == EX_READ)
 532		trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
 533	else if (type == EX_BLOCK_AGE)
 534		trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei);
 535	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536}
 537
 538static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
 539				struct extent_tree *et, struct extent_info *ei,
 540				struct extent_node *prev_ex,
 541				struct extent_node *next_ex)
 542{
 543	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
 544	struct extent_node *en = NULL;
 545
 546	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
 547		prev_ex->ei.len += ei->len;
 548		ei = &prev_ex->ei;
 549		en = prev_ex;
 550	}
 551
 552	if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
 553		next_ex->ei.fofs = ei->fofs;
 554		next_ex->ei.len += ei->len;
 555		if (et->type == EX_READ)
 556			next_ex->ei.blk = ei->blk;
 557		if (en)
 558			__release_extent_node(sbi, et, prev_ex);
 559
 
 
 560		en = next_ex;
 561	}
 562
 563	if (!en)
 564		return NULL;
 565
 566	__try_update_largest_extent(et, en);
 567
 568	spin_lock(&eti->extent_lock);
 569	if (!list_empty(&en->list)) {
 570		list_move_tail(&en->list, &eti->extent_list);
 571		et->cached_en = en;
 572	}
 573	spin_unlock(&eti->extent_lock);
 574	return en;
 575}
 576
 577static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
 578				struct extent_tree *et, struct extent_info *ei,
 579				struct rb_node **insert_p,
 580				struct rb_node *insert_parent,
 581				bool leftmost)
 582{
 583	struct extent_tree_info *eti = &sbi->extent_tree[et->type];
 584	struct rb_node **p = &et->root.rb_root.rb_node;
 585	struct rb_node *parent = NULL;
 586	struct extent_node *en = NULL;
 587
 588	if (insert_p && insert_parent) {
 589		parent = insert_parent;
 590		p = insert_p;
 591		goto do_insert;
 592	}
 593
 594	leftmost = true;
 595
 596	/* look up extent_node in the rb tree */
 597	while (*p) {
 598		parent = *p;
 599		en = rb_entry(parent, struct extent_node, rb_node);
 600
 601		if (ei->fofs < en->ei.fofs) {
 602			p = &(*p)->rb_left;
 603		} else if (ei->fofs >= en->ei.fofs + en->ei.len) {
 604			p = &(*p)->rb_right;
 605			leftmost = false;
 606		} else {
 607			f2fs_bug_on(sbi, 1);
 608		}
 609	}
 610
 611do_insert:
 612	en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
 613	if (!en)
 614		return NULL;
 615
 616	__try_update_largest_extent(et, en);
 617
 618	/* update in global extent list */
 619	spin_lock(&eti->extent_lock);
 620	list_add_tail(&en->list, &eti->extent_list);
 621	et->cached_en = en;
 622	spin_unlock(&eti->extent_lock);
 623	return en;
 624}
 625
 626static unsigned int __destroy_extent_node(struct inode *inode,
 627					enum extent_type type)
 628{
 629	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 630	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
 631	unsigned int nr_shrink = type == EX_READ ?
 632				READ_EXTENT_CACHE_SHRINK_NUMBER :
 633				AGE_EXTENT_CACHE_SHRINK_NUMBER;
 634	unsigned int node_cnt = 0;
 635
 636	if (!et || !atomic_read(&et->node_cnt))
 637		return 0;
 638
 639	while (atomic_read(&et->node_cnt)) {
 640		write_lock(&et->lock);
 641		node_cnt += __free_extent_tree(sbi, et, nr_shrink);
 642		write_unlock(&et->lock);
 643	}
 644
 645	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
 646
 647	return node_cnt;
 648}
 649
 650static void __update_extent_tree_range(struct inode *inode,
 651			struct extent_info *tei, enum extent_type type)
 652{
 653	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 654	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
 655	struct extent_node *en = NULL, *en1 = NULL;
 656	struct extent_node *prev_en = NULL, *next_en = NULL;
 657	struct extent_info ei, dei, prev;
 658	struct rb_node **insert_p = NULL, *insert_parent = NULL;
 659	unsigned int fofs = tei->fofs, len = tei->len;
 660	unsigned int end = fofs + len;
 661	bool updated = false;
 662	bool leftmost = false;
 663
 664	if (!et)
 665		return;
 666
 667	if (type == EX_READ)
 668		trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
 669						tei->blk, 0);
 670	else if (type == EX_BLOCK_AGE)
 671		trace_f2fs_update_age_extent_tree_range(inode, fofs, len,
 672						tei->age, tei->last_blocks);
 673
 674	write_lock(&et->lock);
 675
 676	if (type == EX_READ) {
 677		if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
 678			write_unlock(&et->lock);
 679			return;
 680		}
 681
 682		prev = et->largest;
 683		dei.len = 0;
 684
 685		/*
 686		 * drop largest extent before lookup, in case it's already
 687		 * been shrunk from extent tree
 688		 */
 689		__drop_largest_extent(et, fofs, len);
 690	}
 691
 692	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
 693	en = __lookup_extent_node_ret(&et->root,
 694					et->cached_en, fofs,
 695					&prev_en, &next_en,
 696					&insert_p, &insert_parent,
 697					&leftmost);
 698	if (!en)
 699		en = next_en;
 700
 701	/* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */
 702	while (en && en->ei.fofs < end) {
 703		unsigned int org_end;
 704		int parts = 0;	/* # of parts current extent split into */
 705
 706		next_en = en1 = NULL;
 707
 708		dei = en->ei;
 709		org_end = dei.fofs + dei.len;
 710		f2fs_bug_on(sbi, fofs >= org_end);
 711
 712		if (fofs > dei.fofs && (type != EX_READ ||
 713				fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
 714			en->ei.len = fofs - en->ei.fofs;
 715			prev_en = en;
 716			parts = 1;
 717		}
 718
 719		if (end < org_end && (type != EX_READ ||
 720			(org_end - end >= F2FS_MIN_EXTENT_LEN &&
 721			atomic_read(&et->node_cnt) <
 722					sbi->max_read_extent_count))) {
 723			if (parts) {
 724				__set_extent_info(&ei,
 725					end, org_end - end,
 726					end - dei.fofs + dei.blk, false,
 727					dei.age, dei.last_blocks,
 728					type);
 729				en1 = __insert_extent_tree(sbi, et, &ei,
 730							NULL, NULL, true);
 731				next_en = en1;
 732			} else {
 733				__set_extent_info(&en->ei,
 734					end, en->ei.len - (end - dei.fofs),
 735					en->ei.blk + (end - dei.fofs), true,
 736					dei.age, dei.last_blocks,
 737					type);
 738				next_en = en;
 739			}
 740			parts++;
 741		}
 742
 743		if (!next_en) {
 744			struct rb_node *node = rb_next(&en->rb_node);
 745
 746			next_en = rb_entry_safe(node, struct extent_node,
 747						rb_node);
 
 748		}
 749
 750		if (parts)
 751			__try_update_largest_extent(et, en);
 752		else
 753			__release_extent_node(sbi, et, en);
 754
 755		/*
 756		 * if original extent is split into zero or two parts, extent
 757		 * tree has been altered by deletion or insertion, therefore
 758		 * invalidate pointers regard to tree.
 759		 */
 760		if (parts != 1) {
 761			insert_p = NULL;
 762			insert_parent = NULL;
 763		}
 764		en = next_en;
 765	}
 766
 767	if (type == EX_BLOCK_AGE)
 768		goto update_age_extent_cache;
 769
 770	/* 3. update extent in read extent cache */
 771	BUG_ON(type != EX_READ);
 772
 773	if (tei->blk) {
 774		__set_extent_info(&ei, fofs, len, tei->blk, false,
 775				  0, 0, EX_READ);
 776		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
 777			__insert_extent_tree(sbi, et, &ei,
 778					insert_p, insert_parent, leftmost);
 779
 780		/* give up extent_cache, if split and small updates happen */
 781		if (dei.len >= 1 &&
 782				prev.len < F2FS_MIN_EXTENT_LEN &&
 783				et->largest.len < F2FS_MIN_EXTENT_LEN) {
 784			et->largest.len = 0;
 785			et->largest_updated = true;
 786			set_inode_flag(inode, FI_NO_EXTENT);
 787		}
 788	}
 789
 790	if (et->largest_updated) {
 791		et->largest_updated = false;
 792		updated = true;
 793	}
 794	goto out_read_extent_cache;
 795update_age_extent_cache:
 796	if (!tei->last_blocks)
 797		goto out_read_extent_cache;
 798
 799	__set_extent_info(&ei, fofs, len, 0, false,
 800			tei->age, tei->last_blocks, EX_BLOCK_AGE);
 801	if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
 802		__insert_extent_tree(sbi, et, &ei,
 803					insert_p, insert_parent, leftmost);
 804out_read_extent_cache:
 805	write_unlock(&et->lock);
 806
 807	if (is_inode_flag_set(inode, FI_NO_EXTENT))
 808		__destroy_extent_node(inode, EX_READ);
 809
 810	if (updated)
 811		f2fs_mark_inode_dirty_sync(inode, true);
 812}
 813
 814#ifdef CONFIG_F2FS_FS_COMPRESSION
 815void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
 816				pgoff_t fofs, block_t blkaddr, unsigned int llen,
 817				unsigned int c_len)
 818{
 819	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 820	struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
 821	struct extent_node *en = NULL;
 822	struct extent_node *prev_en = NULL, *next_en = NULL;
 823	struct extent_info ei;
 824	struct rb_node **insert_p = NULL, *insert_parent = NULL;
 825	bool leftmost = false;
 826
 827	trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
 828						blkaddr, c_len);
 829
 830	/* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
 831	if (is_inode_flag_set(inode, FI_NO_EXTENT))
 832		return;
 833
 834	write_lock(&et->lock);
 835
 836	en = __lookup_extent_node_ret(&et->root,
 837					et->cached_en, fofs,
 838					&prev_en, &next_en,
 839					&insert_p, &insert_parent,
 840					&leftmost);
 841	if (en)
 842		goto unlock_out;
 843
 844	__set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ);
 845	ei.c_len = c_len;
 846
 847	if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
 848		__insert_extent_tree(sbi, et, &ei,
 849				insert_p, insert_parent, leftmost);
 850unlock_out:
 851	write_unlock(&et->lock);
 852}
 853#endif
 854
 855static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
 856						unsigned long long new,
 857						unsigned long long old)
 858{
 859	unsigned int rem_old, rem_new;
 860	unsigned long long res;
 861	unsigned int weight = sbi->last_age_weight;
 862
 863	res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
 864		+ div_u64_rem(old, 100, &rem_old) * weight;
 865
 866	if (rem_new)
 867		res += rem_new * (100 - weight) / 100;
 868	if (rem_old)
 869		res += rem_old * weight / 100;
 870
 871	return res;
 872}
 873
 874/* This returns a new age and allocated blocks in ei */
 875static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
 876						block_t blkaddr)
 877{
 878	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 879	loff_t f_size = i_size_read(inode);
 880	unsigned long long cur_blocks =
 881				atomic64_read(&sbi->allocated_data_blocks);
 882	struct extent_info tei = *ei;	/* only fofs and len are valid */
 883
 884	/*
 885	 * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
 886	 * file block even in seq write. So don't record age for newly last file
 887	 * block here.
 888	 */
 889	if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
 890			blkaddr == NEW_ADDR)
 891		return -EINVAL;
 892
 893	if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
 894		unsigned long long cur_age;
 895
 896		if (cur_blocks >= tei.last_blocks)
 897			cur_age = cur_blocks - tei.last_blocks;
 898		else
 899			/* allocated_data_blocks overflow */
 900			cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
 901
 902		if (tei.age)
 903			ei->age = __calculate_block_age(sbi, cur_age, tei.age);
 904		else
 905			ei->age = cur_age;
 906		ei->last_blocks = cur_blocks;
 907		WARN_ON(ei->age > cur_blocks);
 908		return 0;
 909	}
 910
 911	f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
 912
 913	/* the data block was allocated for the first time */
 914	if (blkaddr == NEW_ADDR)
 915		goto out;
 916
 917	if (__is_valid_data_blkaddr(blkaddr) &&
 918	    !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
 919		return -EINVAL;
 920out:
 921	/*
 922	 * init block age with zero, this can happen when the block age extent
 923	 * was reclaimed due to memory constraint or system reboot
 924	 */
 925	ei->age = 0;
 926	ei->last_blocks = cur_blocks;
 927	return 0;
 928}
 929
 930static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
 931{
 932	struct extent_info ei = {};
 933
 934	if (!__may_extent_tree(dn->inode, type))
 935		return;
 936
 937	ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
 938								dn->ofs_in_node;
 939	ei.len = 1;
 940
 941	if (type == EX_READ) {
 942		if (dn->data_blkaddr == NEW_ADDR)
 943			ei.blk = NULL_ADDR;
 944		else
 945			ei.blk = dn->data_blkaddr;
 946	} else if (type == EX_BLOCK_AGE) {
 947		if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
 948			return;
 949	}
 950	__update_extent_tree_range(dn->inode, &ei, type);
 951}
 952
 953static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
 954					enum extent_type type)
 955{
 956	struct extent_tree_info *eti = &sbi->extent_tree[type];
 957	struct extent_tree *et, *next;
 958	struct extent_node *en;
 959	unsigned int node_cnt = 0, tree_cnt = 0;
 960	int remained;
 961
 962	if (!atomic_read(&eti->total_zombie_tree))
 
 
 
 963		goto free_node;
 964
 965	if (!mutex_trylock(&eti->extent_tree_lock))
 966		goto out;
 967
 968	/* 1. remove unreferenced extent tree */
 969	list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
 970		if (atomic_read(&et->node_cnt)) {
 971			write_lock(&et->lock);
 972			node_cnt += __free_extent_tree(sbi, et,
 973					nr_shrink - node_cnt - tree_cnt);
 974			write_unlock(&et->lock);
 975		}
 976
 977		if (atomic_read(&et->node_cnt))
 978			goto unlock_out;
 979
 980		list_del_init(&et->list);
 981		radix_tree_delete(&eti->extent_tree_root, et->ino);
 982		kmem_cache_free(extent_tree_slab, et);
 983		atomic_dec(&eti->total_ext_tree);
 984		atomic_dec(&eti->total_zombie_tree);
 985		tree_cnt++;
 986
 987		if (node_cnt + tree_cnt >= nr_shrink)
 988			goto unlock_out;
 989		cond_resched();
 990	}
 991	mutex_unlock(&eti->extent_tree_lock);
 992
 993free_node:
 994	/* 2. remove LRU extent entries */
 995	if (!mutex_trylock(&eti->extent_tree_lock))
 996		goto out;
 997
 998	remained = nr_shrink - (node_cnt + tree_cnt);
 999
1000	spin_lock(&eti->extent_lock);
1001	for (; remained > 0; remained--) {
1002		if (list_empty(&eti->extent_list))
1003			break;
1004		en = list_first_entry(&eti->extent_list,
1005					struct extent_node, list);
1006		et = en->et;
1007		if (!write_trylock(&et->lock)) {
1008			/* refresh this extent node's position in extent list */
1009			list_move_tail(&en->list, &eti->extent_list);
1010			continue;
1011		}
1012
1013		list_del_init(&en->list);
1014		spin_unlock(&eti->extent_lock);
1015
1016		__detach_extent_node(sbi, et, en);
1017
1018		write_unlock(&et->lock);
1019		node_cnt++;
1020		spin_lock(&eti->extent_lock);
1021	}
1022	spin_unlock(&eti->extent_lock);
1023
1024unlock_out:
1025	mutex_unlock(&eti->extent_tree_lock);
1026out:
1027	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
1028
1029	return node_cnt + tree_cnt;
1030}
1031
1032/* read extent cache operations */
1033bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
1034				struct extent_info *ei)
1035{
1036	if (!__may_extent_tree(inode, EX_READ))
1037		return false;
1038
1039	return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
1040}
1041
1042bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
1043				block_t *blkaddr)
1044{
1045	struct extent_info ei = {};
1046
1047	if (!f2fs_lookup_read_extent_cache(inode, index, &ei))
1048		return false;
1049	*blkaddr = ei.blk + index - ei.fofs;
1050	return true;
1051}
1052
1053void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
1054{
1055	return __update_extent_cache(dn, EX_READ);
1056}
1057
1058void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
1059				pgoff_t fofs, block_t blkaddr, unsigned int len)
1060{
1061	struct extent_info ei = {
1062		.fofs = fofs,
1063		.len = len,
1064		.blk = blkaddr,
1065	};
1066
1067	if (!__may_extent_tree(dn->inode, EX_READ))
1068		return;
1069
1070	__update_extent_tree_range(dn->inode, &ei, EX_READ);
1071}
1072
1073unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1074{
1075	if (!test_opt(sbi, READ_EXTENT_CACHE))
1076		return 0;
1077
1078	return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
1079}
1080
1081/* block age extent cache operations */
1082bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
1083				struct extent_info *ei)
1084{
1085	if (!__may_extent_tree(inode, EX_BLOCK_AGE))
1086		return false;
1087
1088	return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE);
1089}
1090
1091void f2fs_update_age_extent_cache(struct dnode_of_data *dn)
1092{
1093	return __update_extent_cache(dn, EX_BLOCK_AGE);
1094}
1095
1096void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
1097				pgoff_t fofs, unsigned int len)
1098{
1099	struct extent_info ei = {
1100		.fofs = fofs,
1101		.len = len,
1102	};
1103
1104	if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
1105		return;
1106
1107	__update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE);
1108}
1109
1110unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1111{
1112	if (!test_opt(sbi, AGE_EXTENT_CACHE))
1113		return 0;
1114
1115	return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
1116}
1117
1118void f2fs_destroy_extent_node(struct inode *inode)
1119{
1120	__destroy_extent_node(inode, EX_READ);
1121	__destroy_extent_node(inode, EX_BLOCK_AGE);
1122}
1123
1124static void __drop_extent_tree(struct inode *inode, enum extent_type type)
1125{
1126	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1127	bool updated = false;
1128
1129	if (!__may_extent_tree(inode, type))
1130		return;
1131
1132	write_lock(&et->lock);
1133	if (type == EX_READ) {
1134		set_inode_flag(inode, FI_NO_EXTENT);
1135		if (et->largest.len) {
1136			et->largest.len = 0;
1137			updated = true;
1138		}
1139	}
1140	write_unlock(&et->lock);
1141
1142	__destroy_extent_node(inode, type);
1143
1144	if (updated)
1145		f2fs_mark_inode_dirty_sync(inode, true);
1146}
1147
1148void f2fs_drop_extent_tree(struct inode *inode)
1149{
1150	__drop_extent_tree(inode, EX_READ);
1151	__drop_extent_tree(inode, EX_BLOCK_AGE);
1152}
1153
1154static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
1155{
1156	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1157	struct extent_tree_info *eti = &sbi->extent_tree[type];
1158	struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1159	unsigned int node_cnt = 0;
1160
1161	if (!et)
1162		return;
1163
1164	if (inode->i_nlink && !is_bad_inode(inode) &&
1165					atomic_read(&et->node_cnt)) {
1166		mutex_lock(&eti->extent_tree_lock);
1167		list_add_tail(&et->list, &eti->zombie_list);
1168		atomic_inc(&eti->total_zombie_tree);
1169		mutex_unlock(&eti->extent_tree_lock);
1170		return;
1171	}
1172
1173	/* free all extent info belong to this extent tree */
1174	node_cnt = __destroy_extent_node(inode, type);
1175
1176	/* delete extent tree entry in radix tree */
1177	mutex_lock(&eti->extent_tree_lock);
1178	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
1179	radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
1180	kmem_cache_free(extent_tree_slab, et);
1181	atomic_dec(&eti->total_ext_tree);
1182	mutex_unlock(&eti->extent_tree_lock);
1183
1184	F2FS_I(inode)->extent_tree[type] = NULL;
1185
1186	trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
1187}
1188
1189void f2fs_destroy_extent_tree(struct inode *inode)
 
1190{
1191	__destroy_extent_tree(inode, EX_READ);
1192	__destroy_extent_tree(inode, EX_BLOCK_AGE);
 
 
1193}
1194
1195static void __init_extent_tree_info(struct extent_tree_info *eti)
1196{
1197	INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
1198	mutex_init(&eti->extent_tree_lock);
1199	INIT_LIST_HEAD(&eti->extent_list);
1200	spin_lock_init(&eti->extent_lock);
1201	atomic_set(&eti->total_ext_tree, 0);
1202	INIT_LIST_HEAD(&eti->zombie_list);
1203	atomic_set(&eti->total_zombie_tree, 0);
1204	atomic_set(&eti->total_ext_node, 0);
 
 
 
 
 
 
 
 
1205}
1206
1207void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
 
 
1208{
1209	__init_extent_tree_info(&sbi->extent_tree[EX_READ]);
1210	__init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]);
1211
1212	/* initialize for block age extents */
1213	atomic64_set(&sbi->allocated_data_blocks, 0);
1214	sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
1215	sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
1216	sbi->last_age_weight = LAST_AGE_WEIGHT;
1217	sbi->max_read_extent_count = DEF_MAX_READ_EXTENT_COUNT;
 
 
 
 
 
 
 
 
1218}
1219
1220int __init f2fs_create_extent_cache(void)
1221{
1222	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1223			sizeof(struct extent_tree));
1224	if (!extent_tree_slab)
1225		return -ENOMEM;
1226	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1227			sizeof(struct extent_node));
1228	if (!extent_node_slab) {
1229		kmem_cache_destroy(extent_tree_slab);
1230		return -ENOMEM;
1231	}
1232	return 0;
1233}
1234
1235void f2fs_destroy_extent_cache(void)
1236{
1237	kmem_cache_destroy(extent_node_slab);
1238	kmem_cache_destroy(extent_tree_slab);
1239}
v4.6
 
  1/*
  2 * f2fs extent cache support
  3 *
  4 * Copyright (c) 2015 Motorola Mobility
  5 * Copyright (c) 2015 Samsung Electronics
  6 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
  7 *          Chao Yu <chao2.yu@samsung.com>
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2 as
 11 * published by the Free Software Foundation.
 12 */
 13
 14#include <linux/fs.h>
 15#include <linux/f2fs_fs.h>
 16
 17#include "f2fs.h"
 18#include "node.h"
 19#include <trace/events/f2fs.h>
 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21static struct kmem_cache *extent_tree_slab;
 22static struct kmem_cache *extent_node_slab;
 23
 24static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 25				struct extent_tree *et, struct extent_info *ei,
 26				struct rb_node *parent, struct rb_node **p)
 
 27{
 
 28	struct extent_node *en;
 29
 30	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
 31	if (!en)
 32		return NULL;
 33
 34	en->ei = *ei;
 35	INIT_LIST_HEAD(&en->list);
 36	en->et = et;
 37
 38	rb_link_node(&en->rb_node, parent, p);
 39	rb_insert_color(&en->rb_node, &et->root);
 40	atomic_inc(&et->node_cnt);
 41	atomic_inc(&sbi->total_ext_node);
 42	return en;
 43}
 44
 45static void __detach_extent_node(struct f2fs_sb_info *sbi,
 46				struct extent_tree *et, struct extent_node *en)
 47{
 48	rb_erase(&en->rb_node, &et->root);
 
 
 49	atomic_dec(&et->node_cnt);
 50	atomic_dec(&sbi->total_ext_node);
 51
 52	if (et->cached_en == en)
 53		et->cached_en = NULL;
 54	kmem_cache_free(extent_node_slab, en);
 55}
 56
 57/*
 58 * Flow to release an extent_node:
 59 * 1. list_del_init
 60 * 2. __detach_extent_node
 61 * 3. kmem_cache_free.
 62 */
 63static void __release_extent_node(struct f2fs_sb_info *sbi,
 64			struct extent_tree *et, struct extent_node *en)
 65{
 66	spin_lock(&sbi->extent_lock);
 
 
 67	f2fs_bug_on(sbi, list_empty(&en->list));
 68	list_del_init(&en->list);
 69	spin_unlock(&sbi->extent_lock);
 70
 71	__detach_extent_node(sbi, et, en);
 72}
 73
 74static struct extent_tree *__grab_extent_tree(struct inode *inode)
 
 75{
 76	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
 77	struct extent_tree *et;
 78	nid_t ino = inode->i_ino;
 79
 80	down_write(&sbi->extent_tree_lock);
 81	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
 82	if (!et) {
 83		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
 84		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
 
 85		memset(et, 0, sizeof(struct extent_tree));
 86		et->ino = ino;
 87		et->root = RB_ROOT;
 
 88		et->cached_en = NULL;
 89		rwlock_init(&et->lock);
 90		INIT_LIST_HEAD(&et->list);
 91		atomic_set(&et->node_cnt, 0);
 92		atomic_inc(&sbi->total_ext_tree);
 93	} else {
 94		atomic_dec(&sbi->total_zombie_tree);
 95		list_del_init(&et->list);
 96	}
 97	up_write(&sbi->extent_tree_lock);
 98
 99	/* never died until evict_inode */
100	F2FS_I(inode)->extent_tree = et;
101
102	return et;
103}
104
105static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
106				struct extent_tree *et, unsigned int fofs)
107{
108	struct rb_node *node = et->root.rb_node;
109	struct extent_node *en = et->cached_en;
110
111	if (en) {
112		struct extent_info *cei = &en->ei;
113
114		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
115			stat_inc_cached_node_hit(sbi);
116			return en;
117		}
118	}
119
120	while (node) {
121		en = rb_entry(node, struct extent_node, rb_node);
122
123		if (fofs < en->ei.fofs) {
124			node = node->rb_left;
125		} else if (fofs >= en->ei.fofs + en->ei.len) {
126			node = node->rb_right;
127		} else {
128			stat_inc_rbtree_node_hit(sbi);
129			return en;
130		}
131	}
132	return NULL;
133}
134
135static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
136				struct extent_tree *et, struct extent_info *ei)
137{
138	struct rb_node **p = &et->root.rb_node;
139	struct extent_node *en;
140
141	en = __attach_extent_node(sbi, et, ei, NULL, p);
142	if (!en)
143		return NULL;
144
145	et->largest = en->ei;
146	et->cached_en = en;
147	return en;
148}
149
150static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
151					struct extent_tree *et)
152{
153	struct rb_node *node, *next;
154	struct extent_node *en;
155	unsigned int count = atomic_read(&et->node_cnt);
 
 
156
157	node = rb_first(&et->root);
158	while (node) {
159		next = rb_next(node);
160		en = rb_entry(node, struct extent_node, rb_node);
161		__release_extent_node(sbi, et, en);
162		node = next;
163	}
164
165	return count - atomic_read(&et->node_cnt);
166}
167
168static void __drop_largest_extent(struct inode *inode,
169					pgoff_t fofs, unsigned int len)
170{
171	struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
172
173	if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
174		largest->len = 0;
 
175}
176
177/* return true, if inode page is changed */
178bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
179{
180	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
 
181	struct extent_tree *et;
182	struct extent_node *en;
183	struct extent_info ei;
184
185	if (!f2fs_may_extent_tree(inode)) {
186		/* drop largest extent */
187		if (i_ext && i_ext->len) {
 
188			i_ext->len = 0;
189			return true;
190		}
191		return false;
 
192	}
193
194	et = __grab_extent_tree(inode);
195
196	if (!i_ext || !i_ext->len)
197		return false;
198
199	set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
200		le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
 
201
202	write_lock(&et->lock);
203	if (atomic_read(&et->node_cnt))
204		goto out;
 
205
206	en = __init_extent_tree(sbi, et, &ei);
 
207	if (en) {
208		spin_lock(&sbi->extent_lock);
209		list_add_tail(&en->list, &sbi->extent_list);
210		spin_unlock(&sbi->extent_lock);
 
 
 
 
 
 
 
 
 
211	}
212out:
213	write_unlock(&et->lock);
214	return false;
215}
216
217static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
218							struct extent_info *ei)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219{
220	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
221	struct extent_tree *et = F2FS_I(inode)->extent_tree;
 
222	struct extent_node *en;
223	bool ret = false;
224
225	f2fs_bug_on(sbi, !et);
 
226
227	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
228
229	read_lock(&et->lock);
230
231	if (et->largest.fofs <= pgofs &&
232			et->largest.fofs + et->largest.len > pgofs) {
 
233		*ei = et->largest;
234		ret = true;
235		stat_inc_largest_node_hit(sbi);
236		goto out;
237	}
238
239	en = __lookup_extent_tree(sbi, et, pgofs);
240	if (en) {
241		*ei = en->ei;
242		spin_lock(&sbi->extent_lock);
243		if (!list_empty(&en->list)) {
244			list_move_tail(&en->list, &sbi->extent_list);
245			et->cached_en = en;
246		}
247		spin_unlock(&sbi->extent_lock);
248		ret = true;
249	}
250out:
251	stat_inc_total_hit(sbi);
252	read_unlock(&et->lock);
253
254	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
255	return ret;
256}
257
 
 
 
 
258
259/*
260 * lookup extent at @fofs, if hit, return the extent
261 * if not, return NULL and
262 * @prev_ex: extent before fofs
263 * @next_ex: extent after fofs
264 * @insert_p: insert point for new extent at fofs
265 * in order to simpfy the insertion after.
266 * tree must stay unchanged between lookup and insertion.
267 */
268static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
269				unsigned int fofs,
270				struct extent_node **prev_ex,
271				struct extent_node **next_ex,
272				struct rb_node ***insert_p,
273				struct rb_node **insert_parent)
274{
275	struct rb_node **pnode = &et->root.rb_node;
276	struct rb_node *parent = NULL, *tmp_node;
277	struct extent_node *en = et->cached_en;
278
279	*insert_p = NULL;
280	*insert_parent = NULL;
281	*prev_ex = NULL;
282	*next_ex = NULL;
283
284	if (RB_EMPTY_ROOT(&et->root))
285		return NULL;
286
287	if (en) {
288		struct extent_info *cei = &en->ei;
289
290		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
291			goto lookup_neighbors;
292	}
 
 
 
 
 
293
294	while (*pnode) {
295		parent = *pnode;
296		en = rb_entry(*pnode, struct extent_node, rb_node);
297
298		if (fofs < en->ei.fofs)
299			pnode = &(*pnode)->rb_left;
300		else if (fofs >= en->ei.fofs + en->ei.len)
301			pnode = &(*pnode)->rb_right;
302		else
303			goto lookup_neighbors;
304	}
305
306	*insert_p = pnode;
307	*insert_parent = parent;
308
309	en = rb_entry(parent, struct extent_node, rb_node);
310	tmp_node = parent;
311	if (parent && fofs > en->ei.fofs)
312		tmp_node = rb_next(parent);
313	*next_ex = tmp_node ?
314		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
315
316	tmp_node = parent;
317	if (parent && fofs < en->ei.fofs)
318		tmp_node = rb_prev(parent);
319	*prev_ex = tmp_node ?
320		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
321	return NULL;
322
323lookup_neighbors:
324	if (fofs == en->ei.fofs) {
325		/* lookup prev node for merging backward later */
326		tmp_node = rb_prev(&en->rb_node);
327		*prev_ex = tmp_node ?
328			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
329	}
330	if (fofs == en->ei.fofs + en->ei.len - 1) {
331		/* lookup next node for merging frontward later */
332		tmp_node = rb_next(&en->rb_node);
333		*next_ex = tmp_node ?
334			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
335	}
336	return en;
337}
338
339static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
340				struct extent_tree *et, struct extent_info *ei,
341				struct extent_node *prev_ex,
342				struct extent_node *next_ex)
343{
 
344	struct extent_node *en = NULL;
345
346	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
347		prev_ex->ei.len += ei->len;
348		ei = &prev_ex->ei;
349		en = prev_ex;
350	}
351
352	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
 
 
 
 
353		if (en)
354			__release_extent_node(sbi, et, prev_ex);
355		next_ex->ei.fofs = ei->fofs;
356		next_ex->ei.blk = ei->blk;
357		next_ex->ei.len += ei->len;
358		en = next_ex;
359	}
360
361	if (!en)
362		return NULL;
363
364	__try_update_largest_extent(et, en);
365
366	spin_lock(&sbi->extent_lock);
367	if (!list_empty(&en->list)) {
368		list_move_tail(&en->list, &sbi->extent_list);
369		et->cached_en = en;
370	}
371	spin_unlock(&sbi->extent_lock);
372	return en;
373}
374
375static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
376				struct extent_tree *et, struct extent_info *ei,
377				struct rb_node **insert_p,
378				struct rb_node *insert_parent)
 
379{
380	struct rb_node **p = &et->root.rb_node;
 
381	struct rb_node *parent = NULL;
382	struct extent_node *en = NULL;
383
384	if (insert_p && insert_parent) {
385		parent = insert_parent;
386		p = insert_p;
387		goto do_insert;
388	}
389
 
 
 
390	while (*p) {
391		parent = *p;
392		en = rb_entry(parent, struct extent_node, rb_node);
393
394		if (ei->fofs < en->ei.fofs)
395			p = &(*p)->rb_left;
396		else if (ei->fofs >= en->ei.fofs + en->ei.len)
397			p = &(*p)->rb_right;
398		else
 
399			f2fs_bug_on(sbi, 1);
 
400	}
 
401do_insert:
402	en = __attach_extent_node(sbi, et, ei, parent, p);
403	if (!en)
404		return NULL;
405
406	__try_update_largest_extent(et, en);
407
408	/* update in global extent list */
409	spin_lock(&sbi->extent_lock);
410	list_add_tail(&en->list, &sbi->extent_list);
411	et->cached_en = en;
412	spin_unlock(&sbi->extent_lock);
413	return en;
414}
415
416static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
417				pgoff_t fofs, block_t blkaddr, unsigned int len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418{
419	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
420	struct extent_tree *et = F2FS_I(inode)->extent_tree;
421	struct extent_node *en = NULL, *en1 = NULL;
422	struct extent_node *prev_en = NULL, *next_en = NULL;
423	struct extent_info ei, dei, prev;
424	struct rb_node **insert_p = NULL, *insert_parent = NULL;
 
425	unsigned int end = fofs + len;
426	unsigned int pos = (unsigned int)fofs;
 
427
428	if (!et)
429		return false;
430
431	trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
 
 
 
 
 
432
433	write_lock(&et->lock);
434
435	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
436		write_unlock(&et->lock);
437		return false;
438	}
 
439
440	prev = et->largest;
441	dei.len = 0;
442
443	/*
444	 * drop largest extent before lookup, in case it's already
445	 * been shrunk from extent tree
446	 */
447	__drop_largest_extent(inode, fofs, len);
 
448
449	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
450	en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
451					&insert_p, &insert_parent);
 
 
 
452	if (!en)
453		en = next_en;
454
455	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
456	while (en && en->ei.fofs < end) {
457		unsigned int org_end;
458		int parts = 0;	/* # of parts current extent split into */
459
460		next_en = en1 = NULL;
461
462		dei = en->ei;
463		org_end = dei.fofs + dei.len;
464		f2fs_bug_on(sbi, pos >= org_end);
465
466		if (pos > dei.fofs &&	pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
467			en->ei.len = pos - en->ei.fofs;
 
468			prev_en = en;
469			parts = 1;
470		}
471
472		if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
 
 
 
473			if (parts) {
474				set_extent_info(&ei, end,
475						end - dei.fofs + dei.blk,
476						org_end - end);
 
 
477				en1 = __insert_extent_tree(sbi, et, &ei,
478							NULL, NULL);
479				next_en = en1;
480			} else {
481				en->ei.fofs = end;
482				en->ei.blk += end - dei.fofs;
483				en->ei.len -= end - dei.fofs;
 
 
484				next_en = en;
485			}
486			parts++;
487		}
488
489		if (!next_en) {
490			struct rb_node *node = rb_next(&en->rb_node);
491
492			next_en = node ?
493				rb_entry(node, struct extent_node, rb_node)
494				: NULL;
495		}
496
497		if (parts)
498			__try_update_largest_extent(et, en);
499		else
500			__release_extent_node(sbi, et, en);
501
502		/*
503		 * if original extent is split into zero or two parts, extent
504		 * tree has been altered by deletion or insertion, therefore
505		 * invalidate pointers regard to tree.
506		 */
507		if (parts != 1) {
508			insert_p = NULL;
509			insert_parent = NULL;
510		}
511		en = next_en;
512	}
513
514	/* 3. update extent in extent cache */
515	if (blkaddr) {
 
 
 
516
517		set_extent_info(&ei, fofs, blkaddr, len);
 
 
518		if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
519			__insert_extent_tree(sbi, et, &ei,
520						insert_p, insert_parent);
521
522		/* give up extent_cache, if split and small updates happen */
523		if (dei.len >= 1 &&
524				prev.len < F2FS_MIN_EXTENT_LEN &&
525				et->largest.len < F2FS_MIN_EXTENT_LEN) {
526			et->largest.len = 0;
527			set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
 
528		}
529	}
530
531	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
532		__free_extent_tree(sbi, et);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
534	write_unlock(&et->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
535
536	return !__is_extent_same(&prev, &et->largest);
 
 
 
 
 
537}
538
539unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 
 
540{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541	struct extent_tree *et, *next;
542	struct extent_node *en;
543	unsigned int node_cnt = 0, tree_cnt = 0;
544	int remained;
545
546	if (!test_opt(sbi, EXTENT_CACHE))
547		return 0;
548
549	if (!atomic_read(&sbi->total_zombie_tree))
550		goto free_node;
551
552	if (!down_write_trylock(&sbi->extent_tree_lock))
553		goto out;
554
555	/* 1. remove unreferenced extent tree */
556	list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
557		if (atomic_read(&et->node_cnt)) {
558			write_lock(&et->lock);
559			node_cnt += __free_extent_tree(sbi, et);
 
560			write_unlock(&et->lock);
561		}
562		f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
 
 
 
563		list_del_init(&et->list);
564		radix_tree_delete(&sbi->extent_tree_root, et->ino);
565		kmem_cache_free(extent_tree_slab, et);
566		atomic_dec(&sbi->total_ext_tree);
567		atomic_dec(&sbi->total_zombie_tree);
568		tree_cnt++;
569
570		if (node_cnt + tree_cnt >= nr_shrink)
571			goto unlock_out;
572		cond_resched();
573	}
574	up_write(&sbi->extent_tree_lock);
575
576free_node:
577	/* 2. remove LRU extent entries */
578	if (!down_write_trylock(&sbi->extent_tree_lock))
579		goto out;
580
581	remained = nr_shrink - (node_cnt + tree_cnt);
582
583	spin_lock(&sbi->extent_lock);
584	for (; remained > 0; remained--) {
585		if (list_empty(&sbi->extent_list))
586			break;
587		en = list_first_entry(&sbi->extent_list,
588					struct extent_node, list);
589		et = en->et;
590		if (!write_trylock(&et->lock)) {
591			/* refresh this extent node's position in extent list */
592			list_move_tail(&en->list, &sbi->extent_list);
593			continue;
594		}
595
596		list_del_init(&en->list);
597		spin_unlock(&sbi->extent_lock);
598
599		__detach_extent_node(sbi, et, en);
600
601		write_unlock(&et->lock);
602		node_cnt++;
603		spin_lock(&sbi->extent_lock);
604	}
605	spin_unlock(&sbi->extent_lock);
606
607unlock_out:
608	up_write(&sbi->extent_tree_lock);
609out:
610	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
611
612	return node_cnt + tree_cnt;
613}
614
615unsigned int f2fs_destroy_extent_node(struct inode *inode)
 
 
 
 
 
 
 
 
 
 
 
616{
617	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
618	struct extent_tree *et = F2FS_I(inode)->extent_tree;
619	unsigned int node_cnt = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620
621	if (!et || !atomic_read(&et->node_cnt))
 
 
622		return 0;
623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624	write_lock(&et->lock);
625	node_cnt = __free_extent_tree(sbi, et);
 
 
 
 
 
 
626	write_unlock(&et->lock);
627
628	return node_cnt;
 
 
 
 
 
 
 
 
 
629}
630
631void f2fs_destroy_extent_tree(struct inode *inode)
632{
633	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
634	struct extent_tree *et = F2FS_I(inode)->extent_tree;
 
635	unsigned int node_cnt = 0;
636
637	if (!et)
638		return;
639
640	if (inode->i_nlink && !is_bad_inode(inode) &&
641					atomic_read(&et->node_cnt)) {
642		down_write(&sbi->extent_tree_lock);
643		list_add_tail(&et->list, &sbi->zombie_list);
644		atomic_inc(&sbi->total_zombie_tree);
645		up_write(&sbi->extent_tree_lock);
646		return;
647	}
648
649	/* free all extent info belong to this extent tree */
650	node_cnt = f2fs_destroy_extent_node(inode);
651
652	/* delete extent tree entry in radix tree */
653	down_write(&sbi->extent_tree_lock);
654	f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
655	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
656	kmem_cache_free(extent_tree_slab, et);
657	atomic_dec(&sbi->total_ext_tree);
658	up_write(&sbi->extent_tree_lock);
659
660	F2FS_I(inode)->extent_tree = NULL;
661
662	trace_f2fs_destroy_extent_tree(inode, node_cnt);
663}
664
665bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
666					struct extent_info *ei)
667{
668	if (!f2fs_may_extent_tree(inode))
669		return false;
670
671	return f2fs_lookup_extent_tree(inode, pgofs, ei);
672}
673
674void f2fs_update_extent_cache(struct dnode_of_data *dn)
675{
676	pgoff_t fofs;
677	block_t blkaddr;
678
679	if (!f2fs_may_extent_tree(dn->inode))
680		return;
681
682	if (dn->data_blkaddr == NEW_ADDR)
683		blkaddr = NULL_ADDR;
684	else
685		blkaddr = dn->data_blkaddr;
686
687	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
688								dn->ofs_in_node;
689
690	if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1))
691		sync_inode_page(dn);
692}
693
694void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
695				pgoff_t fofs, block_t blkaddr, unsigned int len)
696
697{
698	if (!f2fs_may_extent_tree(dn->inode))
699		return;
700
701	if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
702		sync_inode_page(dn);
703}
704
705void init_extent_cache_info(struct f2fs_sb_info *sbi)
706{
707	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
708	init_rwsem(&sbi->extent_tree_lock);
709	INIT_LIST_HEAD(&sbi->extent_list);
710	spin_lock_init(&sbi->extent_lock);
711	atomic_set(&sbi->total_ext_tree, 0);
712	INIT_LIST_HEAD(&sbi->zombie_list);
713	atomic_set(&sbi->total_zombie_tree, 0);
714	atomic_set(&sbi->total_ext_node, 0);
715}
716
717int __init create_extent_cache(void)
718{
719	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
720			sizeof(struct extent_tree));
721	if (!extent_tree_slab)
722		return -ENOMEM;
723	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
724			sizeof(struct extent_node));
725	if (!extent_node_slab) {
726		kmem_cache_destroy(extent_tree_slab);
727		return -ENOMEM;
728	}
729	return 0;
730}
731
732void destroy_extent_cache(void)
733{
734	kmem_cache_destroy(extent_node_slab);
735	kmem_cache_destroy(extent_tree_slab);
736}