Loading...
1/*
2 * f2fs extent cache support
3 *
4 * Copyright (c) 2015 Motorola Mobility
5 * Copyright (c) 2015 Samsung Electronics
6 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
7 * Chao Yu <chao2.yu@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/fs.h>
15#include <linux/f2fs_fs.h>
16
17#include "f2fs.h"
18#include "node.h"
19#include <trace/events/f2fs.h>
20
21static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
22 unsigned int ofs)
23{
24 if (cached_re) {
25 if (cached_re->ofs <= ofs &&
26 cached_re->ofs + cached_re->len > ofs) {
27 return cached_re;
28 }
29 }
30 return NULL;
31}
32
33static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
34 unsigned int ofs)
35{
36 struct rb_node *node = root->rb_node;
37 struct rb_entry *re;
38
39 while (node) {
40 re = rb_entry(node, struct rb_entry, rb_node);
41
42 if (ofs < re->ofs)
43 node = node->rb_left;
44 else if (ofs >= re->ofs + re->len)
45 node = node->rb_right;
46 else
47 return re;
48 }
49 return NULL;
50}
51
52struct rb_entry *__lookup_rb_tree(struct rb_root *root,
53 struct rb_entry *cached_re, unsigned int ofs)
54{
55 struct rb_entry *re;
56
57 re = __lookup_rb_tree_fast(cached_re, ofs);
58 if (!re)
59 return __lookup_rb_tree_slow(root, ofs);
60
61 return re;
62}
63
64struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
65 struct rb_root *root, struct rb_node **parent,
66 unsigned int ofs)
67{
68 struct rb_node **p = &root->rb_node;
69 struct rb_entry *re;
70
71 while (*p) {
72 *parent = *p;
73 re = rb_entry(*parent, struct rb_entry, rb_node);
74
75 if (ofs < re->ofs)
76 p = &(*p)->rb_left;
77 else if (ofs >= re->ofs + re->len)
78 p = &(*p)->rb_right;
79 else
80 f2fs_bug_on(sbi, 1);
81 }
82
83 return p;
84}
85
86/*
87 * lookup rb entry in position of @ofs in rb-tree,
88 * if hit, return the entry, otherwise, return NULL
89 * @prev_ex: extent before ofs
90 * @next_ex: extent after ofs
91 * @insert_p: insert point for new extent at ofs
92 * in order to simpfy the insertion after.
93 * tree must stay unchanged between lookup and insertion.
94 */
95struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
96 struct rb_entry *cached_re,
97 unsigned int ofs,
98 struct rb_entry **prev_entry,
99 struct rb_entry **next_entry,
100 struct rb_node ***insert_p,
101 struct rb_node **insert_parent,
102 bool force)
103{
104 struct rb_node **pnode = &root->rb_node;
105 struct rb_node *parent = NULL, *tmp_node;
106 struct rb_entry *re = cached_re;
107
108 *insert_p = NULL;
109 *insert_parent = NULL;
110 *prev_entry = NULL;
111 *next_entry = NULL;
112
113 if (RB_EMPTY_ROOT(root))
114 return NULL;
115
116 if (re) {
117 if (re->ofs <= ofs && re->ofs + re->len > ofs)
118 goto lookup_neighbors;
119 }
120
121 while (*pnode) {
122 parent = *pnode;
123 re = rb_entry(*pnode, struct rb_entry, rb_node);
124
125 if (ofs < re->ofs)
126 pnode = &(*pnode)->rb_left;
127 else if (ofs >= re->ofs + re->len)
128 pnode = &(*pnode)->rb_right;
129 else
130 goto lookup_neighbors;
131 }
132
133 *insert_p = pnode;
134 *insert_parent = parent;
135
136 re = rb_entry(parent, struct rb_entry, rb_node);
137 tmp_node = parent;
138 if (parent && ofs > re->ofs)
139 tmp_node = rb_next(parent);
140 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
141
142 tmp_node = parent;
143 if (parent && ofs < re->ofs)
144 tmp_node = rb_prev(parent);
145 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
146 return NULL;
147
148lookup_neighbors:
149 if (ofs == re->ofs || force) {
150 /* lookup prev node for merging backward later */
151 tmp_node = rb_prev(&re->rb_node);
152 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
153 }
154 if (ofs == re->ofs + re->len - 1 || force) {
155 /* lookup next node for merging frontward later */
156 tmp_node = rb_next(&re->rb_node);
157 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
158 }
159 return re;
160}
161
162bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
163 struct rb_root *root)
164{
165#ifdef CONFIG_F2FS_CHECK_FS
166 struct rb_node *cur = rb_first(root), *next;
167 struct rb_entry *cur_re, *next_re;
168
169 if (!cur)
170 return true;
171
172 while (cur) {
173 next = rb_next(cur);
174 if (!next)
175 return true;
176
177 cur_re = rb_entry(cur, struct rb_entry, rb_node);
178 next_re = rb_entry(next, struct rb_entry, rb_node);
179
180 if (cur_re->ofs + cur_re->len > next_re->ofs) {
181 f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
182 "cur(%u, %u) next(%u, %u)",
183 cur_re->ofs, cur_re->len,
184 next_re->ofs, next_re->len);
185 return false;
186 }
187
188 cur = next;
189 }
190#endif
191 return true;
192}
193
194static struct kmem_cache *extent_tree_slab;
195static struct kmem_cache *extent_node_slab;
196
197static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
198 struct extent_tree *et, struct extent_info *ei,
199 struct rb_node *parent, struct rb_node **p)
200{
201 struct extent_node *en;
202
203 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
204 if (!en)
205 return NULL;
206
207 en->ei = *ei;
208 INIT_LIST_HEAD(&en->list);
209 en->et = et;
210
211 rb_link_node(&en->rb_node, parent, p);
212 rb_insert_color(&en->rb_node, &et->root);
213 atomic_inc(&et->node_cnt);
214 atomic_inc(&sbi->total_ext_node);
215 return en;
216}
217
218static void __detach_extent_node(struct f2fs_sb_info *sbi,
219 struct extent_tree *et, struct extent_node *en)
220{
221 rb_erase(&en->rb_node, &et->root);
222 atomic_dec(&et->node_cnt);
223 atomic_dec(&sbi->total_ext_node);
224
225 if (et->cached_en == en)
226 et->cached_en = NULL;
227 kmem_cache_free(extent_node_slab, en);
228}
229
230/*
231 * Flow to release an extent_node:
232 * 1. list_del_init
233 * 2. __detach_extent_node
234 * 3. kmem_cache_free.
235 */
236static void __release_extent_node(struct f2fs_sb_info *sbi,
237 struct extent_tree *et, struct extent_node *en)
238{
239 spin_lock(&sbi->extent_lock);
240 f2fs_bug_on(sbi, list_empty(&en->list));
241 list_del_init(&en->list);
242 spin_unlock(&sbi->extent_lock);
243
244 __detach_extent_node(sbi, et, en);
245}
246
247static struct extent_tree *__grab_extent_tree(struct inode *inode)
248{
249 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
250 struct extent_tree *et;
251 nid_t ino = inode->i_ino;
252
253 mutex_lock(&sbi->extent_tree_lock);
254 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
255 if (!et) {
256 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
257 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
258 memset(et, 0, sizeof(struct extent_tree));
259 et->ino = ino;
260 et->root = RB_ROOT;
261 et->cached_en = NULL;
262 rwlock_init(&et->lock);
263 INIT_LIST_HEAD(&et->list);
264 atomic_set(&et->node_cnt, 0);
265 atomic_inc(&sbi->total_ext_tree);
266 } else {
267 atomic_dec(&sbi->total_zombie_tree);
268 list_del_init(&et->list);
269 }
270 mutex_unlock(&sbi->extent_tree_lock);
271
272 /* never died until evict_inode */
273 F2FS_I(inode)->extent_tree = et;
274
275 return et;
276}
277
278static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
279 struct extent_tree *et, struct extent_info *ei)
280{
281 struct rb_node **p = &et->root.rb_node;
282 struct extent_node *en;
283
284 en = __attach_extent_node(sbi, et, ei, NULL, p);
285 if (!en)
286 return NULL;
287
288 et->largest = en->ei;
289 et->cached_en = en;
290 return en;
291}
292
293static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
294 struct extent_tree *et)
295{
296 struct rb_node *node, *next;
297 struct extent_node *en;
298 unsigned int count = atomic_read(&et->node_cnt);
299
300 node = rb_first(&et->root);
301 while (node) {
302 next = rb_next(node);
303 en = rb_entry(node, struct extent_node, rb_node);
304 __release_extent_node(sbi, et, en);
305 node = next;
306 }
307
308 return count - atomic_read(&et->node_cnt);
309}
310
311static void __drop_largest_extent(struct inode *inode,
312 pgoff_t fofs, unsigned int len)
313{
314 struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
315
316 if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
317 largest->len = 0;
318 f2fs_mark_inode_dirty_sync(inode, true);
319 }
320}
321
322/* return true, if inode page is changed */
323static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
324{
325 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
326 struct extent_tree *et;
327 struct extent_node *en;
328 struct extent_info ei;
329
330 if (!f2fs_may_extent_tree(inode)) {
331 /* drop largest extent */
332 if (i_ext && i_ext->len) {
333 i_ext->len = 0;
334 return true;
335 }
336 return false;
337 }
338
339 et = __grab_extent_tree(inode);
340
341 if (!i_ext || !i_ext->len)
342 return false;
343
344 get_extent_info(&ei, i_ext);
345
346 write_lock(&et->lock);
347 if (atomic_read(&et->node_cnt))
348 goto out;
349
350 en = __init_extent_tree(sbi, et, &ei);
351 if (en) {
352 spin_lock(&sbi->extent_lock);
353 list_add_tail(&en->list, &sbi->extent_list);
354 spin_unlock(&sbi->extent_lock);
355 }
356out:
357 write_unlock(&et->lock);
358 return false;
359}
360
361bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
362{
363 bool ret = __f2fs_init_extent_tree(inode, i_ext);
364
365 if (!F2FS_I(inode)->extent_tree)
366 set_inode_flag(inode, FI_NO_EXTENT);
367
368 return ret;
369}
370
371static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
372 struct extent_info *ei)
373{
374 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
375 struct extent_tree *et = F2FS_I(inode)->extent_tree;
376 struct extent_node *en;
377 bool ret = false;
378
379 f2fs_bug_on(sbi, !et);
380
381 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
382
383 read_lock(&et->lock);
384
385 if (et->largest.fofs <= pgofs &&
386 et->largest.fofs + et->largest.len > pgofs) {
387 *ei = et->largest;
388 ret = true;
389 stat_inc_largest_node_hit(sbi);
390 goto out;
391 }
392
393 en = (struct extent_node *)__lookup_rb_tree(&et->root,
394 (struct rb_entry *)et->cached_en, pgofs);
395 if (!en)
396 goto out;
397
398 if (en == et->cached_en)
399 stat_inc_cached_node_hit(sbi);
400 else
401 stat_inc_rbtree_node_hit(sbi);
402
403 *ei = en->ei;
404 spin_lock(&sbi->extent_lock);
405 if (!list_empty(&en->list)) {
406 list_move_tail(&en->list, &sbi->extent_list);
407 et->cached_en = en;
408 }
409 spin_unlock(&sbi->extent_lock);
410 ret = true;
411out:
412 stat_inc_total_hit(sbi);
413 read_unlock(&et->lock);
414
415 trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
416 return ret;
417}
418
419static struct extent_node *__try_merge_extent_node(struct inode *inode,
420 struct extent_tree *et, struct extent_info *ei,
421 struct extent_node *prev_ex,
422 struct extent_node *next_ex)
423{
424 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
425 struct extent_node *en = NULL;
426
427 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
428 prev_ex->ei.len += ei->len;
429 ei = &prev_ex->ei;
430 en = prev_ex;
431 }
432
433 if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
434 next_ex->ei.fofs = ei->fofs;
435 next_ex->ei.blk = ei->blk;
436 next_ex->ei.len += ei->len;
437 if (en)
438 __release_extent_node(sbi, et, prev_ex);
439
440 en = next_ex;
441 }
442
443 if (!en)
444 return NULL;
445
446 __try_update_largest_extent(inode, et, en);
447
448 spin_lock(&sbi->extent_lock);
449 if (!list_empty(&en->list)) {
450 list_move_tail(&en->list, &sbi->extent_list);
451 et->cached_en = en;
452 }
453 spin_unlock(&sbi->extent_lock);
454 return en;
455}
456
457static struct extent_node *__insert_extent_tree(struct inode *inode,
458 struct extent_tree *et, struct extent_info *ei,
459 struct rb_node **insert_p,
460 struct rb_node *insert_parent)
461{
462 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
463 struct rb_node **p;
464 struct rb_node *parent = NULL;
465 struct extent_node *en = NULL;
466
467 if (insert_p && insert_parent) {
468 parent = insert_parent;
469 p = insert_p;
470 goto do_insert;
471 }
472
473 p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
474do_insert:
475 en = __attach_extent_node(sbi, et, ei, parent, p);
476 if (!en)
477 return NULL;
478
479 __try_update_largest_extent(inode, et, en);
480
481 /* update in global extent list */
482 spin_lock(&sbi->extent_lock);
483 list_add_tail(&en->list, &sbi->extent_list);
484 et->cached_en = en;
485 spin_unlock(&sbi->extent_lock);
486 return en;
487}
488
489static void f2fs_update_extent_tree_range(struct inode *inode,
490 pgoff_t fofs, block_t blkaddr, unsigned int len)
491{
492 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
493 struct extent_tree *et = F2FS_I(inode)->extent_tree;
494 struct extent_node *en = NULL, *en1 = NULL;
495 struct extent_node *prev_en = NULL, *next_en = NULL;
496 struct extent_info ei, dei, prev;
497 struct rb_node **insert_p = NULL, *insert_parent = NULL;
498 unsigned int end = fofs + len;
499 unsigned int pos = (unsigned int)fofs;
500
501 if (!et)
502 return;
503
504 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
505
506 write_lock(&et->lock);
507
508 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
509 write_unlock(&et->lock);
510 return;
511 }
512
513 prev = et->largest;
514 dei.len = 0;
515
516 /*
517 * drop largest extent before lookup, in case it's already
518 * been shrunk from extent tree
519 */
520 __drop_largest_extent(inode, fofs, len);
521
522 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
523 en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
524 (struct rb_entry *)et->cached_en, fofs,
525 (struct rb_entry **)&prev_en,
526 (struct rb_entry **)&next_en,
527 &insert_p, &insert_parent, false);
528 if (!en)
529 en = next_en;
530
531 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
532 while (en && en->ei.fofs < end) {
533 unsigned int org_end;
534 int parts = 0; /* # of parts current extent split into */
535
536 next_en = en1 = NULL;
537
538 dei = en->ei;
539 org_end = dei.fofs + dei.len;
540 f2fs_bug_on(sbi, pos >= org_end);
541
542 if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
543 en->ei.len = pos - en->ei.fofs;
544 prev_en = en;
545 parts = 1;
546 }
547
548 if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
549 if (parts) {
550 set_extent_info(&ei, end,
551 end - dei.fofs + dei.blk,
552 org_end - end);
553 en1 = __insert_extent_tree(inode, et, &ei,
554 NULL, NULL);
555 next_en = en1;
556 } else {
557 en->ei.fofs = end;
558 en->ei.blk += end - dei.fofs;
559 en->ei.len -= end - dei.fofs;
560 next_en = en;
561 }
562 parts++;
563 }
564
565 if (!next_en) {
566 struct rb_node *node = rb_next(&en->rb_node);
567
568 next_en = rb_entry_safe(node, struct extent_node,
569 rb_node);
570 }
571
572 if (parts)
573 __try_update_largest_extent(inode, et, en);
574 else
575 __release_extent_node(sbi, et, en);
576
577 /*
578 * if original extent is split into zero or two parts, extent
579 * tree has been altered by deletion or insertion, therefore
580 * invalidate pointers regard to tree.
581 */
582 if (parts != 1) {
583 insert_p = NULL;
584 insert_parent = NULL;
585 }
586 en = next_en;
587 }
588
589 /* 3. update extent in extent cache */
590 if (blkaddr) {
591
592 set_extent_info(&ei, fofs, blkaddr, len);
593 if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
594 __insert_extent_tree(inode, et, &ei,
595 insert_p, insert_parent);
596
597 /* give up extent_cache, if split and small updates happen */
598 if (dei.len >= 1 &&
599 prev.len < F2FS_MIN_EXTENT_LEN &&
600 et->largest.len < F2FS_MIN_EXTENT_LEN) {
601 __drop_largest_extent(inode, 0, UINT_MAX);
602 set_inode_flag(inode, FI_NO_EXTENT);
603 }
604 }
605
606 if (is_inode_flag_set(inode, FI_NO_EXTENT))
607 __free_extent_tree(sbi, et);
608
609 write_unlock(&et->lock);
610}
611
612unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
613{
614 struct extent_tree *et, *next;
615 struct extent_node *en;
616 unsigned int node_cnt = 0, tree_cnt = 0;
617 int remained;
618
619 if (!test_opt(sbi, EXTENT_CACHE))
620 return 0;
621
622 if (!atomic_read(&sbi->total_zombie_tree))
623 goto free_node;
624
625 if (!mutex_trylock(&sbi->extent_tree_lock))
626 goto out;
627
628 /* 1. remove unreferenced extent tree */
629 list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
630 if (atomic_read(&et->node_cnt)) {
631 write_lock(&et->lock);
632 node_cnt += __free_extent_tree(sbi, et);
633 write_unlock(&et->lock);
634 }
635 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
636 list_del_init(&et->list);
637 radix_tree_delete(&sbi->extent_tree_root, et->ino);
638 kmem_cache_free(extent_tree_slab, et);
639 atomic_dec(&sbi->total_ext_tree);
640 atomic_dec(&sbi->total_zombie_tree);
641 tree_cnt++;
642
643 if (node_cnt + tree_cnt >= nr_shrink)
644 goto unlock_out;
645 cond_resched();
646 }
647 mutex_unlock(&sbi->extent_tree_lock);
648
649free_node:
650 /* 2. remove LRU extent entries */
651 if (!mutex_trylock(&sbi->extent_tree_lock))
652 goto out;
653
654 remained = nr_shrink - (node_cnt + tree_cnt);
655
656 spin_lock(&sbi->extent_lock);
657 for (; remained > 0; remained--) {
658 if (list_empty(&sbi->extent_list))
659 break;
660 en = list_first_entry(&sbi->extent_list,
661 struct extent_node, list);
662 et = en->et;
663 if (!write_trylock(&et->lock)) {
664 /* refresh this extent node's position in extent list */
665 list_move_tail(&en->list, &sbi->extent_list);
666 continue;
667 }
668
669 list_del_init(&en->list);
670 spin_unlock(&sbi->extent_lock);
671
672 __detach_extent_node(sbi, et, en);
673
674 write_unlock(&et->lock);
675 node_cnt++;
676 spin_lock(&sbi->extent_lock);
677 }
678 spin_unlock(&sbi->extent_lock);
679
680unlock_out:
681 mutex_unlock(&sbi->extent_tree_lock);
682out:
683 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
684
685 return node_cnt + tree_cnt;
686}
687
688unsigned int f2fs_destroy_extent_node(struct inode *inode)
689{
690 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
691 struct extent_tree *et = F2FS_I(inode)->extent_tree;
692 unsigned int node_cnt = 0;
693
694 if (!et || !atomic_read(&et->node_cnt))
695 return 0;
696
697 write_lock(&et->lock);
698 node_cnt = __free_extent_tree(sbi, et);
699 write_unlock(&et->lock);
700
701 return node_cnt;
702}
703
704void f2fs_drop_extent_tree(struct inode *inode)
705{
706 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
707 struct extent_tree *et = F2FS_I(inode)->extent_tree;
708
709 if (!f2fs_may_extent_tree(inode))
710 return;
711
712 set_inode_flag(inode, FI_NO_EXTENT);
713
714 write_lock(&et->lock);
715 __free_extent_tree(sbi, et);
716 __drop_largest_extent(inode, 0, UINT_MAX);
717 write_unlock(&et->lock);
718}
719
720void f2fs_destroy_extent_tree(struct inode *inode)
721{
722 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
723 struct extent_tree *et = F2FS_I(inode)->extent_tree;
724 unsigned int node_cnt = 0;
725
726 if (!et)
727 return;
728
729 if (inode->i_nlink && !is_bad_inode(inode) &&
730 atomic_read(&et->node_cnt)) {
731 mutex_lock(&sbi->extent_tree_lock);
732 list_add_tail(&et->list, &sbi->zombie_list);
733 atomic_inc(&sbi->total_zombie_tree);
734 mutex_unlock(&sbi->extent_tree_lock);
735 return;
736 }
737
738 /* free all extent info belong to this extent tree */
739 node_cnt = f2fs_destroy_extent_node(inode);
740
741 /* delete extent tree entry in radix tree */
742 mutex_lock(&sbi->extent_tree_lock);
743 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
744 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
745 kmem_cache_free(extent_tree_slab, et);
746 atomic_dec(&sbi->total_ext_tree);
747 mutex_unlock(&sbi->extent_tree_lock);
748
749 F2FS_I(inode)->extent_tree = NULL;
750
751 trace_f2fs_destroy_extent_tree(inode, node_cnt);
752}
753
754bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
755 struct extent_info *ei)
756{
757 if (!f2fs_may_extent_tree(inode))
758 return false;
759
760 return f2fs_lookup_extent_tree(inode, pgofs, ei);
761}
762
763void f2fs_update_extent_cache(struct dnode_of_data *dn)
764{
765 pgoff_t fofs;
766 block_t blkaddr;
767
768 if (!f2fs_may_extent_tree(dn->inode))
769 return;
770
771 if (dn->data_blkaddr == NEW_ADDR)
772 blkaddr = NULL_ADDR;
773 else
774 blkaddr = dn->data_blkaddr;
775
776 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
777 dn->ofs_in_node;
778 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
779}
780
781void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
782 pgoff_t fofs, block_t blkaddr, unsigned int len)
783
784{
785 if (!f2fs_may_extent_tree(dn->inode))
786 return;
787
788 f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
789}
790
791void init_extent_cache_info(struct f2fs_sb_info *sbi)
792{
793 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
794 mutex_init(&sbi->extent_tree_lock);
795 INIT_LIST_HEAD(&sbi->extent_list);
796 spin_lock_init(&sbi->extent_lock);
797 atomic_set(&sbi->total_ext_tree, 0);
798 INIT_LIST_HEAD(&sbi->zombie_list);
799 atomic_set(&sbi->total_zombie_tree, 0);
800 atomic_set(&sbi->total_ext_node, 0);
801}
802
803int __init create_extent_cache(void)
804{
805 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
806 sizeof(struct extent_tree));
807 if (!extent_tree_slab)
808 return -ENOMEM;
809 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
810 sizeof(struct extent_node));
811 if (!extent_node_slab) {
812 kmem_cache_destroy(extent_tree_slab);
813 return -ENOMEM;
814 }
815 return 0;
816}
817
818void destroy_extent_cache(void)
819{
820 kmem_cache_destroy(extent_node_slab);
821 kmem_cache_destroy(extent_tree_slab);
822}
1/*
2 * f2fs extent cache support
3 *
4 * Copyright (c) 2015 Motorola Mobility
5 * Copyright (c) 2015 Samsung Electronics
6 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
7 * Chao Yu <chao2.yu@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/fs.h>
15#include <linux/f2fs_fs.h>
16
17#include "f2fs.h"
18#include "node.h"
19#include <trace/events/f2fs.h>
20
21static struct kmem_cache *extent_tree_slab;
22static struct kmem_cache *extent_node_slab;
23
24static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
25 struct extent_tree *et, struct extent_info *ei,
26 struct rb_node *parent, struct rb_node **p)
27{
28 struct extent_node *en;
29
30 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
31 if (!en)
32 return NULL;
33
34 en->ei = *ei;
35 INIT_LIST_HEAD(&en->list);
36 en->et = et;
37
38 rb_link_node(&en->rb_node, parent, p);
39 rb_insert_color(&en->rb_node, &et->root);
40 atomic_inc(&et->node_cnt);
41 atomic_inc(&sbi->total_ext_node);
42 return en;
43}
44
45static void __detach_extent_node(struct f2fs_sb_info *sbi,
46 struct extent_tree *et, struct extent_node *en)
47{
48 rb_erase(&en->rb_node, &et->root);
49 atomic_dec(&et->node_cnt);
50 atomic_dec(&sbi->total_ext_node);
51
52 if (et->cached_en == en)
53 et->cached_en = NULL;
54 kmem_cache_free(extent_node_slab, en);
55}
56
57/*
58 * Flow to release an extent_node:
59 * 1. list_del_init
60 * 2. __detach_extent_node
61 * 3. kmem_cache_free.
62 */
63static void __release_extent_node(struct f2fs_sb_info *sbi,
64 struct extent_tree *et, struct extent_node *en)
65{
66 spin_lock(&sbi->extent_lock);
67 f2fs_bug_on(sbi, list_empty(&en->list));
68 list_del_init(&en->list);
69 spin_unlock(&sbi->extent_lock);
70
71 __detach_extent_node(sbi, et, en);
72}
73
74static struct extent_tree *__grab_extent_tree(struct inode *inode)
75{
76 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
77 struct extent_tree *et;
78 nid_t ino = inode->i_ino;
79
80 down_write(&sbi->extent_tree_lock);
81 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
82 if (!et) {
83 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
84 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
85 memset(et, 0, sizeof(struct extent_tree));
86 et->ino = ino;
87 et->root = RB_ROOT;
88 et->cached_en = NULL;
89 rwlock_init(&et->lock);
90 INIT_LIST_HEAD(&et->list);
91 atomic_set(&et->node_cnt, 0);
92 atomic_inc(&sbi->total_ext_tree);
93 } else {
94 atomic_dec(&sbi->total_zombie_tree);
95 list_del_init(&et->list);
96 }
97 up_write(&sbi->extent_tree_lock);
98
99 /* never died until evict_inode */
100 F2FS_I(inode)->extent_tree = et;
101
102 return et;
103}
104
105static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
106 struct extent_tree *et, unsigned int fofs)
107{
108 struct rb_node *node = et->root.rb_node;
109 struct extent_node *en = et->cached_en;
110
111 if (en) {
112 struct extent_info *cei = &en->ei;
113
114 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
115 stat_inc_cached_node_hit(sbi);
116 return en;
117 }
118 }
119
120 while (node) {
121 en = rb_entry(node, struct extent_node, rb_node);
122
123 if (fofs < en->ei.fofs) {
124 node = node->rb_left;
125 } else if (fofs >= en->ei.fofs + en->ei.len) {
126 node = node->rb_right;
127 } else {
128 stat_inc_rbtree_node_hit(sbi);
129 return en;
130 }
131 }
132 return NULL;
133}
134
135static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
136 struct extent_tree *et, struct extent_info *ei)
137{
138 struct rb_node **p = &et->root.rb_node;
139 struct extent_node *en;
140
141 en = __attach_extent_node(sbi, et, ei, NULL, p);
142 if (!en)
143 return NULL;
144
145 et->largest = en->ei;
146 et->cached_en = en;
147 return en;
148}
149
150static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
151 struct extent_tree *et)
152{
153 struct rb_node *node, *next;
154 struct extent_node *en;
155 unsigned int count = atomic_read(&et->node_cnt);
156
157 node = rb_first(&et->root);
158 while (node) {
159 next = rb_next(node);
160 en = rb_entry(node, struct extent_node, rb_node);
161 __release_extent_node(sbi, et, en);
162 node = next;
163 }
164
165 return count - atomic_read(&et->node_cnt);
166}
167
168static void __drop_largest_extent(struct inode *inode,
169 pgoff_t fofs, unsigned int len)
170{
171 struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
172
173 if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
174 largest->len = 0;
175}
176
177/* return true, if inode page is changed */
178bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
179{
180 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
181 struct extent_tree *et;
182 struct extent_node *en;
183 struct extent_info ei;
184
185 if (!f2fs_may_extent_tree(inode)) {
186 /* drop largest extent */
187 if (i_ext && i_ext->len) {
188 i_ext->len = 0;
189 return true;
190 }
191 return false;
192 }
193
194 et = __grab_extent_tree(inode);
195
196 if (!i_ext || !i_ext->len)
197 return false;
198
199 set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
200 le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
201
202 write_lock(&et->lock);
203 if (atomic_read(&et->node_cnt))
204 goto out;
205
206 en = __init_extent_tree(sbi, et, &ei);
207 if (en) {
208 spin_lock(&sbi->extent_lock);
209 list_add_tail(&en->list, &sbi->extent_list);
210 spin_unlock(&sbi->extent_lock);
211 }
212out:
213 write_unlock(&et->lock);
214 return false;
215}
216
217static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
218 struct extent_info *ei)
219{
220 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
221 struct extent_tree *et = F2FS_I(inode)->extent_tree;
222 struct extent_node *en;
223 bool ret = false;
224
225 f2fs_bug_on(sbi, !et);
226
227 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
228
229 read_lock(&et->lock);
230
231 if (et->largest.fofs <= pgofs &&
232 et->largest.fofs + et->largest.len > pgofs) {
233 *ei = et->largest;
234 ret = true;
235 stat_inc_largest_node_hit(sbi);
236 goto out;
237 }
238
239 en = __lookup_extent_tree(sbi, et, pgofs);
240 if (en) {
241 *ei = en->ei;
242 spin_lock(&sbi->extent_lock);
243 if (!list_empty(&en->list)) {
244 list_move_tail(&en->list, &sbi->extent_list);
245 et->cached_en = en;
246 }
247 spin_unlock(&sbi->extent_lock);
248 ret = true;
249 }
250out:
251 stat_inc_total_hit(sbi);
252 read_unlock(&et->lock);
253
254 trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
255 return ret;
256}
257
258
259/*
260 * lookup extent at @fofs, if hit, return the extent
261 * if not, return NULL and
262 * @prev_ex: extent before fofs
263 * @next_ex: extent after fofs
264 * @insert_p: insert point for new extent at fofs
265 * in order to simpfy the insertion after.
266 * tree must stay unchanged between lookup and insertion.
267 */
268static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
269 unsigned int fofs,
270 struct extent_node **prev_ex,
271 struct extent_node **next_ex,
272 struct rb_node ***insert_p,
273 struct rb_node **insert_parent)
274{
275 struct rb_node **pnode = &et->root.rb_node;
276 struct rb_node *parent = NULL, *tmp_node;
277 struct extent_node *en = et->cached_en;
278
279 *insert_p = NULL;
280 *insert_parent = NULL;
281 *prev_ex = NULL;
282 *next_ex = NULL;
283
284 if (RB_EMPTY_ROOT(&et->root))
285 return NULL;
286
287 if (en) {
288 struct extent_info *cei = &en->ei;
289
290 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
291 goto lookup_neighbors;
292 }
293
294 while (*pnode) {
295 parent = *pnode;
296 en = rb_entry(*pnode, struct extent_node, rb_node);
297
298 if (fofs < en->ei.fofs)
299 pnode = &(*pnode)->rb_left;
300 else if (fofs >= en->ei.fofs + en->ei.len)
301 pnode = &(*pnode)->rb_right;
302 else
303 goto lookup_neighbors;
304 }
305
306 *insert_p = pnode;
307 *insert_parent = parent;
308
309 en = rb_entry(parent, struct extent_node, rb_node);
310 tmp_node = parent;
311 if (parent && fofs > en->ei.fofs)
312 tmp_node = rb_next(parent);
313 *next_ex = tmp_node ?
314 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
315
316 tmp_node = parent;
317 if (parent && fofs < en->ei.fofs)
318 tmp_node = rb_prev(parent);
319 *prev_ex = tmp_node ?
320 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
321 return NULL;
322
323lookup_neighbors:
324 if (fofs == en->ei.fofs) {
325 /* lookup prev node for merging backward later */
326 tmp_node = rb_prev(&en->rb_node);
327 *prev_ex = tmp_node ?
328 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
329 }
330 if (fofs == en->ei.fofs + en->ei.len - 1) {
331 /* lookup next node for merging frontward later */
332 tmp_node = rb_next(&en->rb_node);
333 *next_ex = tmp_node ?
334 rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
335 }
336 return en;
337}
338
339static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
340 struct extent_tree *et, struct extent_info *ei,
341 struct extent_node *prev_ex,
342 struct extent_node *next_ex)
343{
344 struct extent_node *en = NULL;
345
346 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
347 prev_ex->ei.len += ei->len;
348 ei = &prev_ex->ei;
349 en = prev_ex;
350 }
351
352 if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
353 if (en)
354 __release_extent_node(sbi, et, prev_ex);
355 next_ex->ei.fofs = ei->fofs;
356 next_ex->ei.blk = ei->blk;
357 next_ex->ei.len += ei->len;
358 en = next_ex;
359 }
360
361 if (!en)
362 return NULL;
363
364 __try_update_largest_extent(et, en);
365
366 spin_lock(&sbi->extent_lock);
367 if (!list_empty(&en->list)) {
368 list_move_tail(&en->list, &sbi->extent_list);
369 et->cached_en = en;
370 }
371 spin_unlock(&sbi->extent_lock);
372 return en;
373}
374
375static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
376 struct extent_tree *et, struct extent_info *ei,
377 struct rb_node **insert_p,
378 struct rb_node *insert_parent)
379{
380 struct rb_node **p = &et->root.rb_node;
381 struct rb_node *parent = NULL;
382 struct extent_node *en = NULL;
383
384 if (insert_p && insert_parent) {
385 parent = insert_parent;
386 p = insert_p;
387 goto do_insert;
388 }
389
390 while (*p) {
391 parent = *p;
392 en = rb_entry(parent, struct extent_node, rb_node);
393
394 if (ei->fofs < en->ei.fofs)
395 p = &(*p)->rb_left;
396 else if (ei->fofs >= en->ei.fofs + en->ei.len)
397 p = &(*p)->rb_right;
398 else
399 f2fs_bug_on(sbi, 1);
400 }
401do_insert:
402 en = __attach_extent_node(sbi, et, ei, parent, p);
403 if (!en)
404 return NULL;
405
406 __try_update_largest_extent(et, en);
407
408 /* update in global extent list */
409 spin_lock(&sbi->extent_lock);
410 list_add_tail(&en->list, &sbi->extent_list);
411 et->cached_en = en;
412 spin_unlock(&sbi->extent_lock);
413 return en;
414}
415
416static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
417 pgoff_t fofs, block_t blkaddr, unsigned int len)
418{
419 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
420 struct extent_tree *et = F2FS_I(inode)->extent_tree;
421 struct extent_node *en = NULL, *en1 = NULL;
422 struct extent_node *prev_en = NULL, *next_en = NULL;
423 struct extent_info ei, dei, prev;
424 struct rb_node **insert_p = NULL, *insert_parent = NULL;
425 unsigned int end = fofs + len;
426 unsigned int pos = (unsigned int)fofs;
427
428 if (!et)
429 return false;
430
431 trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
432
433 write_lock(&et->lock);
434
435 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
436 write_unlock(&et->lock);
437 return false;
438 }
439
440 prev = et->largest;
441 dei.len = 0;
442
443 /*
444 * drop largest extent before lookup, in case it's already
445 * been shrunk from extent tree
446 */
447 __drop_largest_extent(inode, fofs, len);
448
449 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
450 en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
451 &insert_p, &insert_parent);
452 if (!en)
453 en = next_en;
454
455 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
456 while (en && en->ei.fofs < end) {
457 unsigned int org_end;
458 int parts = 0; /* # of parts current extent split into */
459
460 next_en = en1 = NULL;
461
462 dei = en->ei;
463 org_end = dei.fofs + dei.len;
464 f2fs_bug_on(sbi, pos >= org_end);
465
466 if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
467 en->ei.len = pos - en->ei.fofs;
468 prev_en = en;
469 parts = 1;
470 }
471
472 if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
473 if (parts) {
474 set_extent_info(&ei, end,
475 end - dei.fofs + dei.blk,
476 org_end - end);
477 en1 = __insert_extent_tree(sbi, et, &ei,
478 NULL, NULL);
479 next_en = en1;
480 } else {
481 en->ei.fofs = end;
482 en->ei.blk += end - dei.fofs;
483 en->ei.len -= end - dei.fofs;
484 next_en = en;
485 }
486 parts++;
487 }
488
489 if (!next_en) {
490 struct rb_node *node = rb_next(&en->rb_node);
491
492 next_en = node ?
493 rb_entry(node, struct extent_node, rb_node)
494 : NULL;
495 }
496
497 if (parts)
498 __try_update_largest_extent(et, en);
499 else
500 __release_extent_node(sbi, et, en);
501
502 /*
503 * if original extent is split into zero or two parts, extent
504 * tree has been altered by deletion or insertion, therefore
505 * invalidate pointers regard to tree.
506 */
507 if (parts != 1) {
508 insert_p = NULL;
509 insert_parent = NULL;
510 }
511 en = next_en;
512 }
513
514 /* 3. update extent in extent cache */
515 if (blkaddr) {
516
517 set_extent_info(&ei, fofs, blkaddr, len);
518 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
519 __insert_extent_tree(sbi, et, &ei,
520 insert_p, insert_parent);
521
522 /* give up extent_cache, if split and small updates happen */
523 if (dei.len >= 1 &&
524 prev.len < F2FS_MIN_EXTENT_LEN &&
525 et->largest.len < F2FS_MIN_EXTENT_LEN) {
526 et->largest.len = 0;
527 set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
528 }
529 }
530
531 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
532 __free_extent_tree(sbi, et);
533
534 write_unlock(&et->lock);
535
536 return !__is_extent_same(&prev, &et->largest);
537}
538
539unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
540{
541 struct extent_tree *et, *next;
542 struct extent_node *en;
543 unsigned int node_cnt = 0, tree_cnt = 0;
544 int remained;
545
546 if (!test_opt(sbi, EXTENT_CACHE))
547 return 0;
548
549 if (!atomic_read(&sbi->total_zombie_tree))
550 goto free_node;
551
552 if (!down_write_trylock(&sbi->extent_tree_lock))
553 goto out;
554
555 /* 1. remove unreferenced extent tree */
556 list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
557 if (atomic_read(&et->node_cnt)) {
558 write_lock(&et->lock);
559 node_cnt += __free_extent_tree(sbi, et);
560 write_unlock(&et->lock);
561 }
562 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
563 list_del_init(&et->list);
564 radix_tree_delete(&sbi->extent_tree_root, et->ino);
565 kmem_cache_free(extent_tree_slab, et);
566 atomic_dec(&sbi->total_ext_tree);
567 atomic_dec(&sbi->total_zombie_tree);
568 tree_cnt++;
569
570 if (node_cnt + tree_cnt >= nr_shrink)
571 goto unlock_out;
572 cond_resched();
573 }
574 up_write(&sbi->extent_tree_lock);
575
576free_node:
577 /* 2. remove LRU extent entries */
578 if (!down_write_trylock(&sbi->extent_tree_lock))
579 goto out;
580
581 remained = nr_shrink - (node_cnt + tree_cnt);
582
583 spin_lock(&sbi->extent_lock);
584 for (; remained > 0; remained--) {
585 if (list_empty(&sbi->extent_list))
586 break;
587 en = list_first_entry(&sbi->extent_list,
588 struct extent_node, list);
589 et = en->et;
590 if (!write_trylock(&et->lock)) {
591 /* refresh this extent node's position in extent list */
592 list_move_tail(&en->list, &sbi->extent_list);
593 continue;
594 }
595
596 list_del_init(&en->list);
597 spin_unlock(&sbi->extent_lock);
598
599 __detach_extent_node(sbi, et, en);
600
601 write_unlock(&et->lock);
602 node_cnt++;
603 spin_lock(&sbi->extent_lock);
604 }
605 spin_unlock(&sbi->extent_lock);
606
607unlock_out:
608 up_write(&sbi->extent_tree_lock);
609out:
610 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
611
612 return node_cnt + tree_cnt;
613}
614
615unsigned int f2fs_destroy_extent_node(struct inode *inode)
616{
617 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
618 struct extent_tree *et = F2FS_I(inode)->extent_tree;
619 unsigned int node_cnt = 0;
620
621 if (!et || !atomic_read(&et->node_cnt))
622 return 0;
623
624 write_lock(&et->lock);
625 node_cnt = __free_extent_tree(sbi, et);
626 write_unlock(&et->lock);
627
628 return node_cnt;
629}
630
631void f2fs_destroy_extent_tree(struct inode *inode)
632{
633 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
634 struct extent_tree *et = F2FS_I(inode)->extent_tree;
635 unsigned int node_cnt = 0;
636
637 if (!et)
638 return;
639
640 if (inode->i_nlink && !is_bad_inode(inode) &&
641 atomic_read(&et->node_cnt)) {
642 down_write(&sbi->extent_tree_lock);
643 list_add_tail(&et->list, &sbi->zombie_list);
644 atomic_inc(&sbi->total_zombie_tree);
645 up_write(&sbi->extent_tree_lock);
646 return;
647 }
648
649 /* free all extent info belong to this extent tree */
650 node_cnt = f2fs_destroy_extent_node(inode);
651
652 /* delete extent tree entry in radix tree */
653 down_write(&sbi->extent_tree_lock);
654 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
655 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
656 kmem_cache_free(extent_tree_slab, et);
657 atomic_dec(&sbi->total_ext_tree);
658 up_write(&sbi->extent_tree_lock);
659
660 F2FS_I(inode)->extent_tree = NULL;
661
662 trace_f2fs_destroy_extent_tree(inode, node_cnt);
663}
664
665bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
666 struct extent_info *ei)
667{
668 if (!f2fs_may_extent_tree(inode))
669 return false;
670
671 return f2fs_lookup_extent_tree(inode, pgofs, ei);
672}
673
674void f2fs_update_extent_cache(struct dnode_of_data *dn)
675{
676 pgoff_t fofs;
677 block_t blkaddr;
678
679 if (!f2fs_may_extent_tree(dn->inode))
680 return;
681
682 if (dn->data_blkaddr == NEW_ADDR)
683 blkaddr = NULL_ADDR;
684 else
685 blkaddr = dn->data_blkaddr;
686
687 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
688 dn->ofs_in_node;
689
690 if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1))
691 sync_inode_page(dn);
692}
693
694void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
695 pgoff_t fofs, block_t blkaddr, unsigned int len)
696
697{
698 if (!f2fs_may_extent_tree(dn->inode))
699 return;
700
701 if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
702 sync_inode_page(dn);
703}
704
705void init_extent_cache_info(struct f2fs_sb_info *sbi)
706{
707 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
708 init_rwsem(&sbi->extent_tree_lock);
709 INIT_LIST_HEAD(&sbi->extent_list);
710 spin_lock_init(&sbi->extent_lock);
711 atomic_set(&sbi->total_ext_tree, 0);
712 INIT_LIST_HEAD(&sbi->zombie_list);
713 atomic_set(&sbi->total_zombie_tree, 0);
714 atomic_set(&sbi->total_ext_node, 0);
715}
716
717int __init create_extent_cache(void)
718{
719 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
720 sizeof(struct extent_tree));
721 if (!extent_tree_slab)
722 return -ENOMEM;
723 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
724 sizeof(struct extent_node));
725 if (!extent_node_slab) {
726 kmem_cache_destroy(extent_tree_slab);
727 return -ENOMEM;
728 }
729 return 0;
730}
731
732void destroy_extent_cache(void)
733{
734 kmem_cache_destroy(extent_node_slab);
735 kmem_cache_destroy(extent_tree_slab);
736}