Loading...
1/*
2 * f2fs shrinker support
3 * the basic infra was copied from fs/ubifs/shrinker.c
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/fs.h>
13#include <linux/f2fs_fs.h>
14
15#include "f2fs.h"
16#include "node.h"
17
18static LIST_HEAD(f2fs_list);
19static DEFINE_SPINLOCK(f2fs_list_lock);
20static unsigned int shrinker_run_no;
21
22static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
23{
24 long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
25
26 return count > 0 ? count : 0;
27}
28
29static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
30{
31 long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
32
33 return count > 0 ? count : 0;
34}
35
36static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
37{
38 return atomic_read(&sbi->total_zombie_tree) +
39 atomic_read(&sbi->total_ext_node);
40}
41
42unsigned long f2fs_shrink_count(struct shrinker *shrink,
43 struct shrink_control *sc)
44{
45 struct f2fs_sb_info *sbi;
46 struct list_head *p;
47 unsigned long count = 0;
48
49 spin_lock(&f2fs_list_lock);
50 p = f2fs_list.next;
51 while (p != &f2fs_list) {
52 sbi = list_entry(p, struct f2fs_sb_info, s_list);
53
54 /* stop f2fs_put_super */
55 if (!mutex_trylock(&sbi->umount_mutex)) {
56 p = p->next;
57 continue;
58 }
59 spin_unlock(&f2fs_list_lock);
60
61 /* count extent cache entries */
62 count += __count_extent_cache(sbi);
63
64 /* shrink clean nat cache entries */
65 count += __count_nat_entries(sbi);
66
67 /* count free nids cache entries */
68 count += __count_free_nids(sbi);
69
70 spin_lock(&f2fs_list_lock);
71 p = p->next;
72 mutex_unlock(&sbi->umount_mutex);
73 }
74 spin_unlock(&f2fs_list_lock);
75 return count;
76}
77
78unsigned long f2fs_shrink_scan(struct shrinker *shrink,
79 struct shrink_control *sc)
80{
81 unsigned long nr = sc->nr_to_scan;
82 struct f2fs_sb_info *sbi;
83 struct list_head *p;
84 unsigned int run_no;
85 unsigned long freed = 0;
86
87 spin_lock(&f2fs_list_lock);
88 do {
89 run_no = ++shrinker_run_no;
90 } while (run_no == 0);
91 p = f2fs_list.next;
92 while (p != &f2fs_list) {
93 sbi = list_entry(p, struct f2fs_sb_info, s_list);
94
95 if (sbi->shrinker_run_no == run_no)
96 break;
97
98 /* stop f2fs_put_super */
99 if (!mutex_trylock(&sbi->umount_mutex)) {
100 p = p->next;
101 continue;
102 }
103 spin_unlock(&f2fs_list_lock);
104
105 sbi->shrinker_run_no = run_no;
106
107 /* shrink extent cache entries */
108 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
109
110 /* shrink clean nat cache entries */
111 if (freed < nr)
112 freed += try_to_free_nats(sbi, nr - freed);
113
114 /* shrink free nids cache entries */
115 if (freed < nr)
116 freed += try_to_free_nids(sbi, nr - freed);
117
118 spin_lock(&f2fs_list_lock);
119 p = p->next;
120 list_move_tail(&sbi->s_list, &f2fs_list);
121 mutex_unlock(&sbi->umount_mutex);
122 if (freed >= nr)
123 break;
124 }
125 spin_unlock(&f2fs_list_lock);
126 return freed;
127}
128
129void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
130{
131 spin_lock(&f2fs_list_lock);
132 list_add_tail(&sbi->s_list, &f2fs_list);
133 spin_unlock(&f2fs_list_lock);
134}
135
136void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
137{
138 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
139
140 spin_lock(&f2fs_list_lock);
141 list_del(&sbi->s_list);
142 spin_unlock(&f2fs_list_lock);
143}
1/*
2 * f2fs shrinker support
3 * the basic infra was copied from fs/ubifs/shrinker.c
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/fs.h>
13#include <linux/f2fs_fs.h>
14
15#include "f2fs.h"
16
17static LIST_HEAD(f2fs_list);
18static DEFINE_SPINLOCK(f2fs_list_lock);
19static unsigned int shrinker_run_no;
20
21static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
22{
23 return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
24}
25
26static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
27{
28 if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
29 return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
30 return 0;
31}
32
33static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
34{
35 return atomic_read(&sbi->total_zombie_tree) +
36 atomic_read(&sbi->total_ext_node);
37}
38
39unsigned long f2fs_shrink_count(struct shrinker *shrink,
40 struct shrink_control *sc)
41{
42 struct f2fs_sb_info *sbi;
43 struct list_head *p;
44 unsigned long count = 0;
45
46 spin_lock(&f2fs_list_lock);
47 p = f2fs_list.next;
48 while (p != &f2fs_list) {
49 sbi = list_entry(p, struct f2fs_sb_info, s_list);
50
51 /* stop f2fs_put_super */
52 if (!mutex_trylock(&sbi->umount_mutex)) {
53 p = p->next;
54 continue;
55 }
56 spin_unlock(&f2fs_list_lock);
57
58 /* count extent cache entries */
59 count += __count_extent_cache(sbi);
60
61 /* shrink clean nat cache entries */
62 count += __count_nat_entries(sbi);
63
64 /* count free nids cache entries */
65 count += __count_free_nids(sbi);
66
67 spin_lock(&f2fs_list_lock);
68 p = p->next;
69 mutex_unlock(&sbi->umount_mutex);
70 }
71 spin_unlock(&f2fs_list_lock);
72 return count;
73}
74
75unsigned long f2fs_shrink_scan(struct shrinker *shrink,
76 struct shrink_control *sc)
77{
78 unsigned long nr = sc->nr_to_scan;
79 struct f2fs_sb_info *sbi;
80 struct list_head *p;
81 unsigned int run_no;
82 unsigned long freed = 0;
83
84 spin_lock(&f2fs_list_lock);
85 do {
86 run_no = ++shrinker_run_no;
87 } while (run_no == 0);
88 p = f2fs_list.next;
89 while (p != &f2fs_list) {
90 sbi = list_entry(p, struct f2fs_sb_info, s_list);
91
92 if (sbi->shrinker_run_no == run_no)
93 break;
94
95 /* stop f2fs_put_super */
96 if (!mutex_trylock(&sbi->umount_mutex)) {
97 p = p->next;
98 continue;
99 }
100 spin_unlock(&f2fs_list_lock);
101
102 sbi->shrinker_run_no = run_no;
103
104 /* shrink extent cache entries */
105 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
106
107 /* shrink clean nat cache entries */
108 if (freed < nr)
109 freed += try_to_free_nats(sbi, nr - freed);
110
111 /* shrink free nids cache entries */
112 if (freed < nr)
113 freed += try_to_free_nids(sbi, nr - freed);
114
115 spin_lock(&f2fs_list_lock);
116 p = p->next;
117 list_move_tail(&sbi->s_list, &f2fs_list);
118 mutex_unlock(&sbi->umount_mutex);
119 if (freed >= nr)
120 break;
121 }
122 spin_unlock(&f2fs_list_lock);
123 return freed;
124}
125
126void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
127{
128 spin_lock(&f2fs_list_lock);
129 list_add_tail(&sbi->s_list, &f2fs_list);
130 spin_unlock(&f2fs_list_lock);
131}
132
133void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
134{
135 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
136
137 spin_lock(&f2fs_list_lock);
138 list_del(&sbi->s_list);
139 spin_unlock(&f2fs_list_lock);
140}