Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 */
6#include "internal.h"
7
8struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
9{
10 struct page *page = *pagepool;
11
12 if (page) {
13 DBG_BUGON(page_ref_count(page) != 1);
14 *pagepool = (struct page *)page_private(page);
15 } else {
16 page = alloc_page(gfp);
17 }
18 return page;
19}
20
21void erofs_release_pages(struct page **pagepool)
22{
23 while (*pagepool) {
24 struct page *page = *pagepool;
25
26 *pagepool = (struct page *)page_private(page);
27 put_page(page);
28 }
29}
30
31#ifdef CONFIG_EROFS_FS_ZIP
32/* global shrink count (for all mounted EROFS instances) */
33static atomic_long_t erofs_global_shrink_cnt;
34
35static bool erofs_workgroup_get(struct erofs_workgroup *grp)
36{
37 if (lockref_get_not_zero(&grp->lockref))
38 return true;
39
40 spin_lock(&grp->lockref.lock);
41 if (__lockref_is_dead(&grp->lockref)) {
42 spin_unlock(&grp->lockref.lock);
43 return false;
44 }
45
46 if (!grp->lockref.count++)
47 atomic_long_dec(&erofs_global_shrink_cnt);
48 spin_unlock(&grp->lockref.lock);
49 return true;
50}
51
52struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
53 pgoff_t index)
54{
55 struct erofs_sb_info *sbi = EROFS_SB(sb);
56 struct erofs_workgroup *grp;
57
58repeat:
59 rcu_read_lock();
60 grp = xa_load(&sbi->managed_pslots, index);
61 if (grp) {
62 if (!erofs_workgroup_get(grp)) {
63 /* prefer to relax rcu read side */
64 rcu_read_unlock();
65 goto repeat;
66 }
67
68 DBG_BUGON(index != grp->index);
69 }
70 rcu_read_unlock();
71 return grp;
72}
73
74struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
75 struct erofs_workgroup *grp)
76{
77 struct erofs_sb_info *const sbi = EROFS_SB(sb);
78 struct erofs_workgroup *pre;
79
80 DBG_BUGON(grp->lockref.count < 1);
81repeat:
82 xa_lock(&sbi->managed_pslots);
83 pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
84 NULL, grp, GFP_KERNEL);
85 if (pre) {
86 if (xa_is_err(pre)) {
87 pre = ERR_PTR(xa_err(pre));
88 } else if (!erofs_workgroup_get(pre)) {
89 /* try to legitimize the current in-tree one */
90 xa_unlock(&sbi->managed_pslots);
91 cond_resched();
92 goto repeat;
93 }
94 grp = pre;
95 }
96 xa_unlock(&sbi->managed_pslots);
97 return grp;
98}
99
100static void __erofs_workgroup_free(struct erofs_workgroup *grp)
101{
102 atomic_long_dec(&erofs_global_shrink_cnt);
103 erofs_workgroup_free_rcu(grp);
104}
105
106void erofs_workgroup_put(struct erofs_workgroup *grp)
107{
108 if (lockref_put_or_lock(&grp->lockref))
109 return;
110
111 DBG_BUGON(__lockref_is_dead(&grp->lockref));
112 if (grp->lockref.count == 1)
113 atomic_long_inc(&erofs_global_shrink_cnt);
114 --grp->lockref.count;
115 spin_unlock(&grp->lockref.lock);
116}
117
118static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
119 struct erofs_workgroup *grp)
120{
121 int free = false;
122
123 spin_lock(&grp->lockref.lock);
124 if (grp->lockref.count)
125 goto out;
126
127 /*
128 * Note that all cached pages should be detached before deleted from
129 * the XArray. Otherwise some cached pages could be still attached to
130 * the orphan old workgroup when the new one is available in the tree.
131 */
132 if (erofs_try_to_free_all_cached_pages(sbi, grp))
133 goto out;
134
135 /*
136 * It's impossible to fail after the workgroup is freezed,
137 * however in order to avoid some race conditions, add a
138 * DBG_BUGON to observe this in advance.
139 */
140 DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
141
142 lockref_mark_dead(&grp->lockref);
143 free = true;
144out:
145 spin_unlock(&grp->lockref.lock);
146 if (free)
147 __erofs_workgroup_free(grp);
148 return free;
149}
150
151static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
152 unsigned long nr_shrink)
153{
154 struct erofs_workgroup *grp;
155 unsigned int freed = 0;
156 unsigned long index;
157
158 xa_lock(&sbi->managed_pslots);
159 xa_for_each(&sbi->managed_pslots, index, grp) {
160 /* try to shrink each valid workgroup */
161 if (!erofs_try_to_release_workgroup(sbi, grp))
162 continue;
163 xa_unlock(&sbi->managed_pslots);
164
165 ++freed;
166 if (!--nr_shrink)
167 return freed;
168 xa_lock(&sbi->managed_pslots);
169 }
170 xa_unlock(&sbi->managed_pslots);
171 return freed;
172}
173
174/* protected by 'erofs_sb_list_lock' */
175static unsigned int shrinker_run_no;
176
177/* protects the mounted 'erofs_sb_list' */
178static DEFINE_SPINLOCK(erofs_sb_list_lock);
179static LIST_HEAD(erofs_sb_list);
180
181void erofs_shrinker_register(struct super_block *sb)
182{
183 struct erofs_sb_info *sbi = EROFS_SB(sb);
184
185 mutex_init(&sbi->umount_mutex);
186
187 spin_lock(&erofs_sb_list_lock);
188 list_add(&sbi->list, &erofs_sb_list);
189 spin_unlock(&erofs_sb_list_lock);
190}
191
192void erofs_shrinker_unregister(struct super_block *sb)
193{
194 struct erofs_sb_info *const sbi = EROFS_SB(sb);
195
196 mutex_lock(&sbi->umount_mutex);
197 /* clean up all remaining workgroups in memory */
198 erofs_shrink_workstation(sbi, ~0UL);
199
200 spin_lock(&erofs_sb_list_lock);
201 list_del(&sbi->list);
202 spin_unlock(&erofs_sb_list_lock);
203 mutex_unlock(&sbi->umount_mutex);
204}
205
206static unsigned long erofs_shrink_count(struct shrinker *shrink,
207 struct shrink_control *sc)
208{
209 return atomic_long_read(&erofs_global_shrink_cnt);
210}
211
212static unsigned long erofs_shrink_scan(struct shrinker *shrink,
213 struct shrink_control *sc)
214{
215 struct erofs_sb_info *sbi;
216 struct list_head *p;
217
218 unsigned long nr = sc->nr_to_scan;
219 unsigned int run_no;
220 unsigned long freed = 0;
221
222 spin_lock(&erofs_sb_list_lock);
223 do {
224 run_no = ++shrinker_run_no;
225 } while (run_no == 0);
226
227 /* Iterate over all mounted superblocks and try to shrink them */
228 p = erofs_sb_list.next;
229 while (p != &erofs_sb_list) {
230 sbi = list_entry(p, struct erofs_sb_info, list);
231
232 /*
233 * We move the ones we do to the end of the list, so we stop
234 * when we see one we have already done.
235 */
236 if (sbi->shrinker_run_no == run_no)
237 break;
238
239 if (!mutex_trylock(&sbi->umount_mutex)) {
240 p = p->next;
241 continue;
242 }
243
244 spin_unlock(&erofs_sb_list_lock);
245 sbi->shrinker_run_no = run_no;
246
247 freed += erofs_shrink_workstation(sbi, nr - freed);
248
249 spin_lock(&erofs_sb_list_lock);
250 /* Get the next list element before we move this one */
251 p = p->next;
252
253 /*
254 * Move this one to the end of the list to provide some
255 * fairness.
256 */
257 list_move_tail(&sbi->list, &erofs_sb_list);
258 mutex_unlock(&sbi->umount_mutex);
259
260 if (freed >= nr)
261 break;
262 }
263 spin_unlock(&erofs_sb_list_lock);
264 return freed;
265}
266
267static struct shrinker *erofs_shrinker_info;
268
269int __init erofs_init_shrinker(void)
270{
271 erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
272 if (!erofs_shrinker_info)
273 return -ENOMEM;
274
275 erofs_shrinker_info->count_objects = erofs_shrink_count;
276 erofs_shrinker_info->scan_objects = erofs_shrink_scan;
277
278 shrinker_register(erofs_shrinker_info);
279
280 return 0;
281}
282
283void erofs_exit_shrinker(void)
284{
285 shrinker_free(erofs_shrinker_info);
286}
287#endif /* !CONFIG_EROFS_FS_ZIP */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "internal.h"
8#include <linux/pagevec.h>
9
10struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
11{
12 struct page *page;
13
14 if (!list_empty(pool)) {
15 page = lru_to_page(pool);
16 DBG_BUGON(page_ref_count(page) != 1);
17 list_del(&page->lru);
18 } else {
19 page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
20 }
21 return page;
22}
23
24#if (EROFS_PCPUBUF_NR_PAGES > 0)
25static struct {
26 u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
27} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
28
29void *erofs_get_pcpubuf(unsigned int pagenr)
30{
31 preempt_disable();
32 return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
33}
34#endif
35
36#ifdef CONFIG_EROFS_FS_ZIP
37/* global shrink count (for all mounted EROFS instances) */
38static atomic_long_t erofs_global_shrink_cnt;
39
40#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
41#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
42
43static int erofs_workgroup_get(struct erofs_workgroup *grp)
44{
45 int o;
46
47repeat:
48 o = erofs_wait_on_workgroup_freezed(grp);
49 if (o <= 0)
50 return -1;
51
52 if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
53 goto repeat;
54
55 /* decrease refcount paired by erofs_workgroup_put */
56 if (o == 1)
57 atomic_long_dec(&erofs_global_shrink_cnt);
58 return 0;
59}
60
61struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
62 pgoff_t index, bool *tag)
63{
64 struct erofs_sb_info *sbi = EROFS_SB(sb);
65 struct erofs_workgroup *grp;
66
67repeat:
68 rcu_read_lock();
69 grp = radix_tree_lookup(&sbi->workstn_tree, index);
70 if (grp) {
71 *tag = xa_pointer_tag(grp);
72 grp = xa_untag_pointer(grp);
73
74 if (erofs_workgroup_get(grp)) {
75 /* prefer to relax rcu read side */
76 rcu_read_unlock();
77 goto repeat;
78 }
79
80 DBG_BUGON(index != grp->index);
81 }
82 rcu_read_unlock();
83 return grp;
84}
85
86int erofs_register_workgroup(struct super_block *sb,
87 struct erofs_workgroup *grp,
88 bool tag)
89{
90 struct erofs_sb_info *sbi;
91 int err;
92
93 /* grp shouldn't be broken or used before */
94 if (atomic_read(&grp->refcount) != 1) {
95 DBG_BUGON(1);
96 return -EINVAL;
97 }
98
99 err = radix_tree_preload(GFP_NOFS);
100 if (err)
101 return err;
102
103 sbi = EROFS_SB(sb);
104 xa_lock(&sbi->workstn_tree);
105
106 grp = xa_tag_pointer(grp, tag);
107
108 /*
109 * Bump up reference count before making this workgroup
110 * visible to other users in order to avoid potential UAF
111 * without serialized by workstn_lock.
112 */
113 __erofs_workgroup_get(grp);
114
115 err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
116 if (err)
117 /*
118 * it's safe to decrease since the workgroup isn't visible
119 * and refcount >= 2 (cannot be freezed).
120 */
121 __erofs_workgroup_put(grp);
122
123 xa_unlock(&sbi->workstn_tree);
124 radix_tree_preload_end();
125 return err;
126}
127
128static void __erofs_workgroup_free(struct erofs_workgroup *grp)
129{
130 atomic_long_dec(&erofs_global_shrink_cnt);
131 erofs_workgroup_free_rcu(grp);
132}
133
134int erofs_workgroup_put(struct erofs_workgroup *grp)
135{
136 int count = atomic_dec_return(&grp->refcount);
137
138 if (count == 1)
139 atomic_long_inc(&erofs_global_shrink_cnt);
140 else if (!count)
141 __erofs_workgroup_free(grp);
142 return count;
143}
144
145static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
146{
147 erofs_workgroup_unfreeze(grp, 0);
148 __erofs_workgroup_free(grp);
149}
150
151static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
152 struct erofs_workgroup *grp,
153 bool cleanup)
154{
155 /*
156 * If managed cache is on, refcount of workgroups
157 * themselves could be < 0 (freezed). In other words,
158 * there is no guarantee that all refcounts > 0.
159 */
160 if (!erofs_workgroup_try_to_freeze(grp, 1))
161 return false;
162
163 /*
164 * Note that all cached pages should be unattached
165 * before deleted from the radix tree. Otherwise some
166 * cached pages could be still attached to the orphan
167 * old workgroup when the new one is available in the tree.
168 */
169 if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
170 erofs_workgroup_unfreeze(grp, 1);
171 return false;
172 }
173
174 /*
175 * It's impossible to fail after the workgroup is freezed,
176 * however in order to avoid some race conditions, add a
177 * DBG_BUGON to observe this in advance.
178 */
179 DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
180 grp->index)) != grp);
181
182 /*
183 * If managed cache is on, last refcount should indicate
184 * the related workstation.
185 */
186 erofs_workgroup_unfreeze_final(grp);
187 return true;
188}
189
190static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
191 unsigned long nr_shrink,
192 bool cleanup)
193{
194 pgoff_t first_index = 0;
195 void *batch[PAGEVEC_SIZE];
196 unsigned int freed = 0;
197
198 int i, found;
199repeat:
200 xa_lock(&sbi->workstn_tree);
201
202 found = radix_tree_gang_lookup(&sbi->workstn_tree,
203 batch, first_index, PAGEVEC_SIZE);
204
205 for (i = 0; i < found; ++i) {
206 struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
207
208 first_index = grp->index + 1;
209
210 /* try to shrink each valid workgroup */
211 if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
212 continue;
213
214 ++freed;
215 if (!--nr_shrink)
216 break;
217 }
218 xa_unlock(&sbi->workstn_tree);
219
220 if (i && nr_shrink)
221 goto repeat;
222 return freed;
223}
224
225/* protected by 'erofs_sb_list_lock' */
226static unsigned int shrinker_run_no;
227
228/* protects the mounted 'erofs_sb_list' */
229static DEFINE_SPINLOCK(erofs_sb_list_lock);
230static LIST_HEAD(erofs_sb_list);
231
232void erofs_shrinker_register(struct super_block *sb)
233{
234 struct erofs_sb_info *sbi = EROFS_SB(sb);
235
236 mutex_init(&sbi->umount_mutex);
237
238 spin_lock(&erofs_sb_list_lock);
239 list_add(&sbi->list, &erofs_sb_list);
240 spin_unlock(&erofs_sb_list_lock);
241}
242
243void erofs_shrinker_unregister(struct super_block *sb)
244{
245 struct erofs_sb_info *const sbi = EROFS_SB(sb);
246
247 mutex_lock(&sbi->umount_mutex);
248 erofs_shrink_workstation(sbi, ~0UL, true);
249
250 spin_lock(&erofs_sb_list_lock);
251 list_del(&sbi->list);
252 spin_unlock(&erofs_sb_list_lock);
253 mutex_unlock(&sbi->umount_mutex);
254}
255
256static unsigned long erofs_shrink_count(struct shrinker *shrink,
257 struct shrink_control *sc)
258{
259 return atomic_long_read(&erofs_global_shrink_cnt);
260}
261
262static unsigned long erofs_shrink_scan(struct shrinker *shrink,
263 struct shrink_control *sc)
264{
265 struct erofs_sb_info *sbi;
266 struct list_head *p;
267
268 unsigned long nr = sc->nr_to_scan;
269 unsigned int run_no;
270 unsigned long freed = 0;
271
272 spin_lock(&erofs_sb_list_lock);
273 do {
274 run_no = ++shrinker_run_no;
275 } while (run_no == 0);
276
277 /* Iterate over all mounted superblocks and try to shrink them */
278 p = erofs_sb_list.next;
279 while (p != &erofs_sb_list) {
280 sbi = list_entry(p, struct erofs_sb_info, list);
281
282 /*
283 * We move the ones we do to the end of the list, so we stop
284 * when we see one we have already done.
285 */
286 if (sbi->shrinker_run_no == run_no)
287 break;
288
289 if (!mutex_trylock(&sbi->umount_mutex)) {
290 p = p->next;
291 continue;
292 }
293
294 spin_unlock(&erofs_sb_list_lock);
295 sbi->shrinker_run_no = run_no;
296
297 freed += erofs_shrink_workstation(sbi, nr, false);
298
299 spin_lock(&erofs_sb_list_lock);
300 /* Get the next list element before we move this one */
301 p = p->next;
302
303 /*
304 * Move this one to the end of the list to provide some
305 * fairness.
306 */
307 list_move_tail(&sbi->list, &erofs_sb_list);
308 mutex_unlock(&sbi->umount_mutex);
309
310 if (freed >= nr)
311 break;
312 }
313 spin_unlock(&erofs_sb_list_lock);
314 return freed;
315}
316
317static struct shrinker erofs_shrinker_info = {
318 .scan_objects = erofs_shrink_scan,
319 .count_objects = erofs_shrink_count,
320 .seeks = DEFAULT_SEEKS,
321};
322
323int __init erofs_init_shrinker(void)
324{
325 return register_shrinker(&erofs_shrinker_info);
326}
327
328void erofs_exit_shrinker(void)
329{
330 unregister_shrinker(&erofs_shrinker_info);
331}
332#endif /* !CONFIG_EROFS_FS_ZIP */
333