Loading...
1/*
2 * f2fs shrinker support
3 * the basic infra was copied from fs/ubifs/shrinker.c
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/fs.h>
13#include <linux/f2fs_fs.h>
14
15#include "f2fs.h"
16
17static LIST_HEAD(f2fs_list);
18static DEFINE_SPINLOCK(f2fs_list_lock);
19static unsigned int shrinker_run_no;
20
21static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
22{
23 return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
24}
25
26static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
27{
28 if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
29 return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
30 return 0;
31}
32
33static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
34{
35 return atomic_read(&sbi->total_zombie_tree) +
36 atomic_read(&sbi->total_ext_node);
37}
38
39unsigned long f2fs_shrink_count(struct shrinker *shrink,
40 struct shrink_control *sc)
41{
42 struct f2fs_sb_info *sbi;
43 struct list_head *p;
44 unsigned long count = 0;
45
46 spin_lock(&f2fs_list_lock);
47 p = f2fs_list.next;
48 while (p != &f2fs_list) {
49 sbi = list_entry(p, struct f2fs_sb_info, s_list);
50
51 /* stop f2fs_put_super */
52 if (!mutex_trylock(&sbi->umount_mutex)) {
53 p = p->next;
54 continue;
55 }
56 spin_unlock(&f2fs_list_lock);
57
58 /* count extent cache entries */
59 count += __count_extent_cache(sbi);
60
61 /* shrink clean nat cache entries */
62 count += __count_nat_entries(sbi);
63
64 /* count free nids cache entries */
65 count += __count_free_nids(sbi);
66
67 spin_lock(&f2fs_list_lock);
68 p = p->next;
69 mutex_unlock(&sbi->umount_mutex);
70 }
71 spin_unlock(&f2fs_list_lock);
72 return count;
73}
74
75unsigned long f2fs_shrink_scan(struct shrinker *shrink,
76 struct shrink_control *sc)
77{
78 unsigned long nr = sc->nr_to_scan;
79 struct f2fs_sb_info *sbi;
80 struct list_head *p;
81 unsigned int run_no;
82 unsigned long freed = 0;
83
84 spin_lock(&f2fs_list_lock);
85 do {
86 run_no = ++shrinker_run_no;
87 } while (run_no == 0);
88 p = f2fs_list.next;
89 while (p != &f2fs_list) {
90 sbi = list_entry(p, struct f2fs_sb_info, s_list);
91
92 if (sbi->shrinker_run_no == run_no)
93 break;
94
95 /* stop f2fs_put_super */
96 if (!mutex_trylock(&sbi->umount_mutex)) {
97 p = p->next;
98 continue;
99 }
100 spin_unlock(&f2fs_list_lock);
101
102 sbi->shrinker_run_no = run_no;
103
104 /* shrink extent cache entries */
105 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
106
107 /* shrink clean nat cache entries */
108 if (freed < nr)
109 freed += try_to_free_nats(sbi, nr - freed);
110
111 /* shrink free nids cache entries */
112 if (freed < nr)
113 freed += try_to_free_nids(sbi, nr - freed);
114
115 spin_lock(&f2fs_list_lock);
116 p = p->next;
117 list_move_tail(&sbi->s_list, &f2fs_list);
118 mutex_unlock(&sbi->umount_mutex);
119 if (freed >= nr)
120 break;
121 }
122 spin_unlock(&f2fs_list_lock);
123 return freed;
124}
125
126void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
127{
128 spin_lock(&f2fs_list_lock);
129 list_add_tail(&sbi->s_list, &f2fs_list);
130 spin_unlock(&f2fs_list_lock);
131}
132
133void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
134{
135 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
136
137 spin_lock(&f2fs_list_lock);
138 list_del(&sbi->s_list);
139 spin_unlock(&f2fs_list_lock);
140}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs shrinker support
4 * the basic infra was copied from fs/ubifs/shrinker.c
5 *
6 * Copyright (c) 2015 Motorola Mobility
7 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
8 */
9#include <linux/fs.h>
10#include <linux/f2fs_fs.h>
11
12#include "f2fs.h"
13#include "node.h"
14
15static LIST_HEAD(f2fs_list);
16static DEFINE_SPINLOCK(f2fs_list_lock);
17static unsigned int shrinker_run_no;
18
19static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
20{
21 return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
22}
23
24static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
25{
26 long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
27
28 return count > 0 ? count : 0;
29}
30
31static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
32{
33 return atomic_read(&sbi->total_zombie_tree) +
34 atomic_read(&sbi->total_ext_node);
35}
36
37unsigned long f2fs_shrink_count(struct shrinker *shrink,
38 struct shrink_control *sc)
39{
40 struct f2fs_sb_info *sbi;
41 struct list_head *p;
42 unsigned long count = 0;
43
44 spin_lock(&f2fs_list_lock);
45 p = f2fs_list.next;
46 while (p != &f2fs_list) {
47 sbi = list_entry(p, struct f2fs_sb_info, s_list);
48
49 /* stop f2fs_put_super */
50 if (!mutex_trylock(&sbi->umount_mutex)) {
51 p = p->next;
52 continue;
53 }
54 spin_unlock(&f2fs_list_lock);
55
56 /* count extent cache entries */
57 count += __count_extent_cache(sbi);
58
59 /* count clean nat cache entries */
60 count += __count_nat_entries(sbi);
61
62 /* count free nids cache entries */
63 count += __count_free_nids(sbi);
64
65 spin_lock(&f2fs_list_lock);
66 p = p->next;
67 mutex_unlock(&sbi->umount_mutex);
68 }
69 spin_unlock(&f2fs_list_lock);
70 return count;
71}
72
73unsigned long f2fs_shrink_scan(struct shrinker *shrink,
74 struct shrink_control *sc)
75{
76 unsigned long nr = sc->nr_to_scan;
77 struct f2fs_sb_info *sbi;
78 struct list_head *p;
79 unsigned int run_no;
80 unsigned long freed = 0;
81
82 spin_lock(&f2fs_list_lock);
83 do {
84 run_no = ++shrinker_run_no;
85 } while (run_no == 0);
86 p = f2fs_list.next;
87 while (p != &f2fs_list) {
88 sbi = list_entry(p, struct f2fs_sb_info, s_list);
89
90 if (sbi->shrinker_run_no == run_no)
91 break;
92
93 /* stop f2fs_put_super */
94 if (!mutex_trylock(&sbi->umount_mutex)) {
95 p = p->next;
96 continue;
97 }
98 spin_unlock(&f2fs_list_lock);
99
100 sbi->shrinker_run_no = run_no;
101
102 /* shrink extent cache entries */
103 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
104
105 /* shrink clean nat cache entries */
106 if (freed < nr)
107 freed += f2fs_try_to_free_nats(sbi, nr - freed);
108
109 /* shrink free nids cache entries */
110 if (freed < nr)
111 freed += f2fs_try_to_free_nids(sbi, nr - freed);
112
113 spin_lock(&f2fs_list_lock);
114 p = p->next;
115 list_move_tail(&sbi->s_list, &f2fs_list);
116 mutex_unlock(&sbi->umount_mutex);
117 if (freed >= nr)
118 break;
119 }
120 spin_unlock(&f2fs_list_lock);
121 return freed;
122}
123
124void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
125{
126 spin_lock(&f2fs_list_lock);
127 list_add_tail(&sbi->s_list, &f2fs_list);
128 spin_unlock(&f2fs_list_lock);
129}
130
131void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
132{
133 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
134
135 spin_lock(&f2fs_list_lock);
136 list_del_init(&sbi->s_list);
137 spin_unlock(&f2fs_list_lock);
138}