Loading...
Note: File does not exist in v3.5.6.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/module.h>
10#include <linux/backing-dev.h>
11#include <linux/init.h>
12#include <linux/f2fs_fs.h>
13#include <linux/kthread.h>
14#include <linux/delay.h>
15#include <linux/freezer.h>
16#include <linux/sched/signal.h>
17
18#include "f2fs.h"
19#include "node.h"
20#include "segment.h"
21#include "gc.h"
22#include <trace/events/f2fs.h>
23
24static struct kmem_cache *victim_entry_slab;
25
26static unsigned int count_bits(const unsigned long *addr,
27 unsigned int offset, unsigned int len);
28
29static int gc_thread_func(void *data)
30{
31 struct f2fs_sb_info *sbi = data;
32 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
35 unsigned int wait_ms;
36
37 wait_ms = gc_th->min_sleep_time;
38
39 set_freezable();
40 do {
41 bool sync_mode, foreground = false;
42
43 wait_event_interruptible_timeout(*wq,
44 kthread_should_stop() || freezing(current) ||
45 waitqueue_active(fggc_wq) ||
46 gc_th->gc_wake,
47 msecs_to_jiffies(wait_ms));
48
49 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
50 foreground = true;
51
52 /* give it a try one time */
53 if (gc_th->gc_wake)
54 gc_th->gc_wake = 0;
55
56 if (try_to_freeze()) {
57 stat_other_skip_bggc_count(sbi);
58 continue;
59 }
60 if (kthread_should_stop())
61 break;
62
63 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
64 increase_sleep_time(gc_th, &wait_ms);
65 stat_other_skip_bggc_count(sbi);
66 continue;
67 }
68
69 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
70 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
71 f2fs_stop_checkpoint(sbi, false);
72 }
73
74 if (!sb_start_write_trylock(sbi->sb)) {
75 stat_other_skip_bggc_count(sbi);
76 continue;
77 }
78
79 /*
80 * [GC triggering condition]
81 * 0. GC is not conducted currently.
82 * 1. There are enough dirty segments.
83 * 2. IO subsystem is idle by checking the # of writeback pages.
84 * 3. IO subsystem is idle by checking the # of requests in
85 * bdev's request list.
86 *
87 * Note) We have to avoid triggering GCs frequently.
88 * Because it is possible that some segments can be
89 * invalidated soon after by user update or deletion.
90 * So, I'd like to wait some time to collect dirty segments.
91 */
92 if (sbi->gc_mode == GC_URGENT_HIGH) {
93 wait_ms = gc_th->urgent_sleep_time;
94 down_write(&sbi->gc_lock);
95 goto do_gc;
96 }
97
98 if (foreground) {
99 down_write(&sbi->gc_lock);
100 goto do_gc;
101 } else if (!down_write_trylock(&sbi->gc_lock)) {
102 stat_other_skip_bggc_count(sbi);
103 goto next;
104 }
105
106 if (!is_idle(sbi, GC_TIME)) {
107 increase_sleep_time(gc_th, &wait_ms);
108 up_write(&sbi->gc_lock);
109 stat_io_skip_bggc_count(sbi);
110 goto next;
111 }
112
113 if (has_enough_invalid_blocks(sbi))
114 decrease_sleep_time(gc_th, &wait_ms);
115 else
116 increase_sleep_time(gc_th, &wait_ms);
117do_gc:
118 if (!foreground)
119 stat_inc_bggc_count(sbi->stat_info);
120
121 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
122
123 /* foreground GC was been triggered via f2fs_balance_fs() */
124 if (foreground)
125 sync_mode = false;
126
127 /* if return value is not zero, no victim was selected */
128 if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
129 wait_ms = gc_th->no_gc_sleep_time;
130
131 if (foreground)
132 wake_up_all(&gc_th->fggc_wq);
133
134 trace_f2fs_background_gc(sbi->sb, wait_ms,
135 prefree_segments(sbi), free_segments(sbi));
136
137 /* balancing f2fs's metadata periodically */
138 f2fs_balance_fs_bg(sbi, true);
139next:
140 sb_end_write(sbi->sb);
141
142 } while (!kthread_should_stop());
143 return 0;
144}
145
146int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
147{
148 struct f2fs_gc_kthread *gc_th;
149 dev_t dev = sbi->sb->s_bdev->bd_dev;
150 int err = 0;
151
152 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
153 if (!gc_th) {
154 err = -ENOMEM;
155 goto out;
156 }
157
158 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
159 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
160 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
161 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
162
163 gc_th->gc_wake = 0;
164
165 sbi->gc_thread = gc_th;
166 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
167 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
168 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
169 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
170 if (IS_ERR(gc_th->f2fs_gc_task)) {
171 err = PTR_ERR(gc_th->f2fs_gc_task);
172 kfree(gc_th);
173 sbi->gc_thread = NULL;
174 }
175out:
176 return err;
177}
178
179void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
180{
181 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
182
183 if (!gc_th)
184 return;
185 kthread_stop(gc_th->f2fs_gc_task);
186 wake_up_all(&gc_th->fggc_wq);
187 kfree(gc_th);
188 sbi->gc_thread = NULL;
189}
190
191static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
192{
193 int gc_mode;
194
195 if (gc_type == BG_GC) {
196 if (sbi->am.atgc_enabled)
197 gc_mode = GC_AT;
198 else
199 gc_mode = GC_CB;
200 } else {
201 gc_mode = GC_GREEDY;
202 }
203
204 switch (sbi->gc_mode) {
205 case GC_IDLE_CB:
206 gc_mode = GC_CB;
207 break;
208 case GC_IDLE_GREEDY:
209 case GC_URGENT_HIGH:
210 gc_mode = GC_GREEDY;
211 break;
212 case GC_IDLE_AT:
213 gc_mode = GC_AT;
214 break;
215 }
216
217 return gc_mode;
218}
219
220static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
221 int type, struct victim_sel_policy *p)
222{
223 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
224
225 if (p->alloc_mode == SSR) {
226 p->gc_mode = GC_GREEDY;
227 p->dirty_bitmap = dirty_i->dirty_segmap[type];
228 p->max_search = dirty_i->nr_dirty[type];
229 p->ofs_unit = 1;
230 } else if (p->alloc_mode == AT_SSR) {
231 p->gc_mode = GC_GREEDY;
232 p->dirty_bitmap = dirty_i->dirty_segmap[type];
233 p->max_search = dirty_i->nr_dirty[type];
234 p->ofs_unit = 1;
235 } else {
236 p->gc_mode = select_gc_type(sbi, gc_type);
237 p->ofs_unit = sbi->segs_per_sec;
238 if (__is_large_section(sbi)) {
239 p->dirty_bitmap = dirty_i->dirty_secmap;
240 p->max_search = count_bits(p->dirty_bitmap,
241 0, MAIN_SECS(sbi));
242 } else {
243 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
244 p->max_search = dirty_i->nr_dirty[DIRTY];
245 }
246 }
247
248 /*
249 * adjust candidates range, should select all dirty segments for
250 * foreground GC and urgent GC cases.
251 */
252 if (gc_type != FG_GC &&
253 (sbi->gc_mode != GC_URGENT_HIGH) &&
254 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
255 p->max_search > sbi->max_victim_search)
256 p->max_search = sbi->max_victim_search;
257
258 /* let's select beginning hot/small space first in no_heap mode*/
259 if (test_opt(sbi, NOHEAP) &&
260 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
261 p->offset = 0;
262 else
263 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
264}
265
266static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
267 struct victim_sel_policy *p)
268{
269 /* SSR allocates in a segment unit */
270 if (p->alloc_mode == SSR)
271 return sbi->blocks_per_seg;
272 else if (p->alloc_mode == AT_SSR)
273 return UINT_MAX;
274
275 /* LFS */
276 if (p->gc_mode == GC_GREEDY)
277 return 2 * sbi->blocks_per_seg * p->ofs_unit;
278 else if (p->gc_mode == GC_CB)
279 return UINT_MAX;
280 else if (p->gc_mode == GC_AT)
281 return UINT_MAX;
282 else /* No other gc_mode */
283 return 0;
284}
285
286static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
287{
288 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
289 unsigned int secno;
290
291 /*
292 * If the gc_type is FG_GC, we can select victim segments
293 * selected by background GC before.
294 * Those segments guarantee they have small valid blocks.
295 */
296 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
297 if (sec_usage_check(sbi, secno))
298 continue;
299 clear_bit(secno, dirty_i->victim_secmap);
300 return GET_SEG_FROM_SEC(sbi, secno);
301 }
302 return NULL_SEGNO;
303}
304
305static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
306{
307 struct sit_info *sit_i = SIT_I(sbi);
308 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
309 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
310 unsigned long long mtime = 0;
311 unsigned int vblocks;
312 unsigned char age = 0;
313 unsigned char u;
314 unsigned int i;
315 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
316
317 for (i = 0; i < usable_segs_per_sec; i++)
318 mtime += get_seg_entry(sbi, start + i)->mtime;
319 vblocks = get_valid_blocks(sbi, segno, true);
320
321 mtime = div_u64(mtime, usable_segs_per_sec);
322 vblocks = div_u64(vblocks, usable_segs_per_sec);
323
324 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
325
326 /* Handle if the system time has changed by the user */
327 if (mtime < sit_i->min_mtime)
328 sit_i->min_mtime = mtime;
329 if (mtime > sit_i->max_mtime)
330 sit_i->max_mtime = mtime;
331 if (sit_i->max_mtime != sit_i->min_mtime)
332 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
333 sit_i->max_mtime - sit_i->min_mtime);
334
335 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
336}
337
338static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
339 unsigned int segno, struct victim_sel_policy *p)
340{
341 if (p->alloc_mode == SSR)
342 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
343
344 /* alloc_mode == LFS */
345 if (p->gc_mode == GC_GREEDY)
346 return get_valid_blocks(sbi, segno, true);
347 else if (p->gc_mode == GC_CB)
348 return get_cb_cost(sbi, segno);
349
350 f2fs_bug_on(sbi, 1);
351 return 0;
352}
353
354static unsigned int count_bits(const unsigned long *addr,
355 unsigned int offset, unsigned int len)
356{
357 unsigned int end = offset + len, sum = 0;
358
359 while (offset < end) {
360 if (test_bit(offset++, addr))
361 ++sum;
362 }
363 return sum;
364}
365
366static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
367 unsigned long long mtime, unsigned int segno,
368 struct rb_node *parent, struct rb_node **p,
369 bool left_most)
370{
371 struct atgc_management *am = &sbi->am;
372 struct victim_entry *ve;
373
374 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS);
375
376 ve->mtime = mtime;
377 ve->segno = segno;
378
379 rb_link_node(&ve->rb_node, parent, p);
380 rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
381
382 list_add_tail(&ve->list, &am->victim_list);
383
384 am->victim_count++;
385
386 return ve;
387}
388
389static void insert_victim_entry(struct f2fs_sb_info *sbi,
390 unsigned long long mtime, unsigned int segno)
391{
392 struct atgc_management *am = &sbi->am;
393 struct rb_node **p;
394 struct rb_node *parent = NULL;
395 bool left_most = true;
396
397 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
398 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
399}
400
401static void add_victim_entry(struct f2fs_sb_info *sbi,
402 struct victim_sel_policy *p, unsigned int segno)
403{
404 struct sit_info *sit_i = SIT_I(sbi);
405 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
406 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
407 unsigned long long mtime = 0;
408 unsigned int i;
409
410 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
411 if (p->gc_mode == GC_AT &&
412 get_valid_blocks(sbi, segno, true) == 0)
413 return;
414 }
415
416 for (i = 0; i < sbi->segs_per_sec; i++)
417 mtime += get_seg_entry(sbi, start + i)->mtime;
418 mtime = div_u64(mtime, sbi->segs_per_sec);
419
420 /* Handle if the system time has changed by the user */
421 if (mtime < sit_i->min_mtime)
422 sit_i->min_mtime = mtime;
423 if (mtime > sit_i->max_mtime)
424 sit_i->max_mtime = mtime;
425 if (mtime < sit_i->dirty_min_mtime)
426 sit_i->dirty_min_mtime = mtime;
427 if (mtime > sit_i->dirty_max_mtime)
428 sit_i->dirty_max_mtime = mtime;
429
430 /* don't choose young section as candidate */
431 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
432 return;
433
434 insert_victim_entry(sbi, mtime, segno);
435}
436
437static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
438 struct victim_sel_policy *p)
439{
440 struct atgc_management *am = &sbi->am;
441 struct rb_node *parent = NULL;
442 bool left_most;
443
444 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
445
446 return parent;
447}
448
449static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
450 struct victim_sel_policy *p)
451{
452 struct sit_info *sit_i = SIT_I(sbi);
453 struct atgc_management *am = &sbi->am;
454 struct rb_root_cached *root = &am->root;
455 struct rb_node *node;
456 struct rb_entry *re;
457 struct victim_entry *ve;
458 unsigned long long total_time;
459 unsigned long long age, u, accu;
460 unsigned long long max_mtime = sit_i->dirty_max_mtime;
461 unsigned long long min_mtime = sit_i->dirty_min_mtime;
462 unsigned int sec_blocks = BLKS_PER_SEC(sbi);
463 unsigned int vblocks;
464 unsigned int dirty_threshold = max(am->max_candidate_count,
465 am->candidate_ratio *
466 am->victim_count / 100);
467 unsigned int age_weight = am->age_weight;
468 unsigned int cost;
469 unsigned int iter = 0;
470
471 if (max_mtime < min_mtime)
472 return;
473
474 max_mtime += 1;
475 total_time = max_mtime - min_mtime;
476
477 accu = div64_u64(ULLONG_MAX, total_time);
478 accu = min_t(unsigned long long, div_u64(accu, 100),
479 DEFAULT_ACCURACY_CLASS);
480
481 node = rb_first_cached(root);
482next:
483 re = rb_entry_safe(node, struct rb_entry, rb_node);
484 if (!re)
485 return;
486
487 ve = (struct victim_entry *)re;
488
489 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
490 goto skip;
491
492 /* age = 10000 * x% * 60 */
493 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
494 age_weight;
495
496 vblocks = get_valid_blocks(sbi, ve->segno, true);
497 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
498
499 /* u = 10000 * x% * 40 */
500 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
501 (100 - age_weight);
502
503 f2fs_bug_on(sbi, age + u >= UINT_MAX);
504
505 cost = UINT_MAX - (age + u);
506 iter++;
507
508 if (cost < p->min_cost ||
509 (cost == p->min_cost && age > p->oldest_age)) {
510 p->min_cost = cost;
511 p->oldest_age = age;
512 p->min_segno = ve->segno;
513 }
514skip:
515 if (iter < dirty_threshold) {
516 node = rb_next(node);
517 goto next;
518 }
519}
520
521/*
522 * select candidates around source section in range of
523 * [target - dirty_threshold, target + dirty_threshold]
524 */
525static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
526 struct victim_sel_policy *p)
527{
528 struct sit_info *sit_i = SIT_I(sbi);
529 struct atgc_management *am = &sbi->am;
530 struct rb_node *node;
531 struct rb_entry *re;
532 struct victim_entry *ve;
533 unsigned long long age;
534 unsigned long long max_mtime = sit_i->dirty_max_mtime;
535 unsigned long long min_mtime = sit_i->dirty_min_mtime;
536 unsigned int seg_blocks = sbi->blocks_per_seg;
537 unsigned int vblocks;
538 unsigned int dirty_threshold = max(am->max_candidate_count,
539 am->candidate_ratio *
540 am->victim_count / 100);
541 unsigned int cost;
542 unsigned int iter = 0;
543 int stage = 0;
544
545 if (max_mtime < min_mtime)
546 return;
547 max_mtime += 1;
548next_stage:
549 node = lookup_central_victim(sbi, p);
550next_node:
551 re = rb_entry_safe(node, struct rb_entry, rb_node);
552 if (!re) {
553 if (stage == 0)
554 goto skip_stage;
555 return;
556 }
557
558 ve = (struct victim_entry *)re;
559
560 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
561 goto skip_node;
562
563 age = max_mtime - ve->mtime;
564
565 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
566 f2fs_bug_on(sbi, !vblocks);
567
568 /* rare case */
569 if (vblocks == seg_blocks)
570 goto skip_node;
571
572 iter++;
573
574 age = max_mtime - abs(p->age - age);
575 cost = UINT_MAX - vblocks;
576
577 if (cost < p->min_cost ||
578 (cost == p->min_cost && age > p->oldest_age)) {
579 p->min_cost = cost;
580 p->oldest_age = age;
581 p->min_segno = ve->segno;
582 }
583skip_node:
584 if (iter < dirty_threshold) {
585 if (stage == 0)
586 node = rb_prev(node);
587 else if (stage == 1)
588 node = rb_next(node);
589 goto next_node;
590 }
591skip_stage:
592 if (stage < 1) {
593 stage++;
594 iter = 0;
595 goto next_stage;
596 }
597}
598static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
599 struct victim_sel_policy *p)
600{
601 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
602 &sbi->am.root, true));
603
604 if (p->gc_mode == GC_AT)
605 atgc_lookup_victim(sbi, p);
606 else if (p->alloc_mode == AT_SSR)
607 atssr_lookup_victim(sbi, p);
608 else
609 f2fs_bug_on(sbi, 1);
610}
611
612static void release_victim_entry(struct f2fs_sb_info *sbi)
613{
614 struct atgc_management *am = &sbi->am;
615 struct victim_entry *ve, *tmp;
616
617 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
618 list_del(&ve->list);
619 kmem_cache_free(victim_entry_slab, ve);
620 am->victim_count--;
621 }
622
623 am->root = RB_ROOT_CACHED;
624
625 f2fs_bug_on(sbi, am->victim_count);
626 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
627}
628
629/*
630 * This function is called from two paths.
631 * One is garbage collection and the other is SSR segment selection.
632 * When it is called during GC, it just gets a victim segment
633 * and it does not remove it from dirty seglist.
634 * When it is called from SSR segment selection, it finds a segment
635 * which has minimum valid blocks and removes it from dirty seglist.
636 */
637static int get_victim_by_default(struct f2fs_sb_info *sbi,
638 unsigned int *result, int gc_type, int type,
639 char alloc_mode, unsigned long long age)
640{
641 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
642 struct sit_info *sm = SIT_I(sbi);
643 struct victim_sel_policy p;
644 unsigned int secno, last_victim;
645 unsigned int last_segment;
646 unsigned int nsearched;
647 bool is_atgc;
648 int ret = 0;
649
650 mutex_lock(&dirty_i->seglist_lock);
651 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
652
653 p.alloc_mode = alloc_mode;
654 p.age = age;
655 p.age_threshold = sbi->am.age_threshold;
656
657retry:
658 select_policy(sbi, gc_type, type, &p);
659 p.min_segno = NULL_SEGNO;
660 p.oldest_age = 0;
661 p.min_cost = get_max_cost(sbi, &p);
662
663 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
664 nsearched = 0;
665
666 if (is_atgc)
667 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
668
669 if (*result != NULL_SEGNO) {
670 if (!get_valid_blocks(sbi, *result, false)) {
671 ret = -ENODATA;
672 goto out;
673 }
674
675 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
676 ret = -EBUSY;
677 else
678 p.min_segno = *result;
679 goto out;
680 }
681
682 ret = -ENODATA;
683 if (p.max_search == 0)
684 goto out;
685
686 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
687 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
688 p.min_segno = sbi->next_victim_seg[BG_GC];
689 *result = p.min_segno;
690 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
691 goto got_result;
692 }
693 if (gc_type == FG_GC &&
694 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
695 p.min_segno = sbi->next_victim_seg[FG_GC];
696 *result = p.min_segno;
697 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
698 goto got_result;
699 }
700 }
701
702 last_victim = sm->last_victim[p.gc_mode];
703 if (p.alloc_mode == LFS && gc_type == FG_GC) {
704 p.min_segno = check_bg_victims(sbi);
705 if (p.min_segno != NULL_SEGNO)
706 goto got_it;
707 }
708
709 while (1) {
710 unsigned long cost, *dirty_bitmap;
711 unsigned int unit_no, segno;
712
713 dirty_bitmap = p.dirty_bitmap;
714 unit_no = find_next_bit(dirty_bitmap,
715 last_segment / p.ofs_unit,
716 p.offset / p.ofs_unit);
717 segno = unit_no * p.ofs_unit;
718 if (segno >= last_segment) {
719 if (sm->last_victim[p.gc_mode]) {
720 last_segment =
721 sm->last_victim[p.gc_mode];
722 sm->last_victim[p.gc_mode] = 0;
723 p.offset = 0;
724 continue;
725 }
726 break;
727 }
728
729 p.offset = segno + p.ofs_unit;
730 nsearched++;
731
732#ifdef CONFIG_F2FS_CHECK_FS
733 /*
734 * skip selecting the invalid segno (that is failed due to block
735 * validity check failure during GC) to avoid endless GC loop in
736 * such cases.
737 */
738 if (test_bit(segno, sm->invalid_segmap))
739 goto next;
740#endif
741
742 secno = GET_SEC_FROM_SEG(sbi, segno);
743
744 if (sec_usage_check(sbi, secno))
745 goto next;
746
747 /* Don't touch checkpointed data */
748 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
749 if (p.alloc_mode == LFS) {
750 /*
751 * LFS is set to find source section during GC.
752 * The victim should have no checkpointed data.
753 */
754 if (get_ckpt_valid_blocks(sbi, segno, true))
755 goto next;
756 } else {
757 /*
758 * SSR | AT_SSR are set to find target segment
759 * for writes which can be full by checkpointed
760 * and newly written blocks.
761 */
762 if (!f2fs_segment_has_free_slot(sbi, segno))
763 goto next;
764 }
765 }
766
767 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
768 goto next;
769
770 if (is_atgc) {
771 add_victim_entry(sbi, &p, segno);
772 goto next;
773 }
774
775 cost = get_gc_cost(sbi, segno, &p);
776
777 if (p.min_cost > cost) {
778 p.min_segno = segno;
779 p.min_cost = cost;
780 }
781next:
782 if (nsearched >= p.max_search) {
783 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
784 sm->last_victim[p.gc_mode] =
785 last_victim + p.ofs_unit;
786 else
787 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
788 sm->last_victim[p.gc_mode] %=
789 (MAIN_SECS(sbi) * sbi->segs_per_sec);
790 break;
791 }
792 }
793
794 /* get victim for GC_AT/AT_SSR */
795 if (is_atgc) {
796 lookup_victim_by_age(sbi, &p);
797 release_victim_entry(sbi);
798 }
799
800 if (is_atgc && p.min_segno == NULL_SEGNO &&
801 sm->elapsed_time < p.age_threshold) {
802 p.age_threshold = 0;
803 goto retry;
804 }
805
806 if (p.min_segno != NULL_SEGNO) {
807got_it:
808 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
809got_result:
810 if (p.alloc_mode == LFS) {
811 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
812 if (gc_type == FG_GC)
813 sbi->cur_victim_sec = secno;
814 else
815 set_bit(secno, dirty_i->victim_secmap);
816 }
817 ret = 0;
818
819 }
820out:
821 if (p.min_segno != NULL_SEGNO)
822 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
823 sbi->cur_victim_sec,
824 prefree_segments(sbi), free_segments(sbi));
825 mutex_unlock(&dirty_i->seglist_lock);
826
827 return ret;
828}
829
830static const struct victim_selection default_v_ops = {
831 .get_victim = get_victim_by_default,
832};
833
834static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
835{
836 struct inode_entry *ie;
837
838 ie = radix_tree_lookup(&gc_list->iroot, ino);
839 if (ie)
840 return ie->inode;
841 return NULL;
842}
843
844static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
845{
846 struct inode_entry *new_ie;
847
848 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
849 iput(inode);
850 return;
851 }
852 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
853 new_ie->inode = inode;
854
855 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
856 list_add_tail(&new_ie->list, &gc_list->ilist);
857}
858
859static void put_gc_inode(struct gc_inode_list *gc_list)
860{
861 struct inode_entry *ie, *next_ie;
862
863 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
864 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
865 iput(ie->inode);
866 list_del(&ie->list);
867 kmem_cache_free(f2fs_inode_entry_slab, ie);
868 }
869}
870
871static int check_valid_map(struct f2fs_sb_info *sbi,
872 unsigned int segno, int offset)
873{
874 struct sit_info *sit_i = SIT_I(sbi);
875 struct seg_entry *sentry;
876 int ret;
877
878 down_read(&sit_i->sentry_lock);
879 sentry = get_seg_entry(sbi, segno);
880 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
881 up_read(&sit_i->sentry_lock);
882 return ret;
883}
884
885/*
886 * This function compares node address got in summary with that in NAT.
887 * On validity, copy that node with cold status, otherwise (invalid node)
888 * ignore that.
889 */
890static int gc_node_segment(struct f2fs_sb_info *sbi,
891 struct f2fs_summary *sum, unsigned int segno, int gc_type)
892{
893 struct f2fs_summary *entry;
894 block_t start_addr;
895 int off;
896 int phase = 0;
897 bool fggc = (gc_type == FG_GC);
898 int submitted = 0;
899 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
900
901 start_addr = START_BLOCK(sbi, segno);
902
903next_step:
904 entry = sum;
905
906 if (fggc && phase == 2)
907 atomic_inc(&sbi->wb_sync_req[NODE]);
908
909 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
910 nid_t nid = le32_to_cpu(entry->nid);
911 struct page *node_page;
912 struct node_info ni;
913 int err;
914
915 /* stop BG_GC if there is not enough free sections. */
916 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
917 return submitted;
918
919 if (check_valid_map(sbi, segno, off) == 0)
920 continue;
921
922 if (phase == 0) {
923 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
924 META_NAT, true);
925 continue;
926 }
927
928 if (phase == 1) {
929 f2fs_ra_node_page(sbi, nid);
930 continue;
931 }
932
933 /* phase == 2 */
934 node_page = f2fs_get_node_page(sbi, nid);
935 if (IS_ERR(node_page))
936 continue;
937
938 /* block may become invalid during f2fs_get_node_page */
939 if (check_valid_map(sbi, segno, off) == 0) {
940 f2fs_put_page(node_page, 1);
941 continue;
942 }
943
944 if (f2fs_get_node_info(sbi, nid, &ni)) {
945 f2fs_put_page(node_page, 1);
946 continue;
947 }
948
949 if (ni.blk_addr != start_addr + off) {
950 f2fs_put_page(node_page, 1);
951 continue;
952 }
953
954 err = f2fs_move_node_page(node_page, gc_type);
955 if (!err && gc_type == FG_GC)
956 submitted++;
957 stat_inc_node_blk_count(sbi, 1, gc_type);
958 }
959
960 if (++phase < 3)
961 goto next_step;
962
963 if (fggc)
964 atomic_dec(&sbi->wb_sync_req[NODE]);
965 return submitted;
966}
967
968/*
969 * Calculate start block index indicating the given node offset.
970 * Be careful, caller should give this node offset only indicating direct node
971 * blocks. If any node offsets, which point the other types of node blocks such
972 * as indirect or double indirect node blocks, are given, it must be a caller's
973 * bug.
974 */
975block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
976{
977 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
978 unsigned int bidx;
979
980 if (node_ofs == 0)
981 return 0;
982
983 if (node_ofs <= 2) {
984 bidx = node_ofs - 1;
985 } else if (node_ofs <= indirect_blks) {
986 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
987
988 bidx = node_ofs - 2 - dec;
989 } else {
990 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
991
992 bidx = node_ofs - 5 - dec;
993 }
994 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
995}
996
997static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
998 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
999{
1000 struct page *node_page;
1001 nid_t nid;
1002 unsigned int ofs_in_node;
1003 block_t source_blkaddr;
1004
1005 nid = le32_to_cpu(sum->nid);
1006 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1007
1008 node_page = f2fs_get_node_page(sbi, nid);
1009 if (IS_ERR(node_page))
1010 return false;
1011
1012 if (f2fs_get_node_info(sbi, nid, dni)) {
1013 f2fs_put_page(node_page, 1);
1014 return false;
1015 }
1016
1017 if (sum->version != dni->version) {
1018 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1019 __func__);
1020 set_sbi_flag(sbi, SBI_NEED_FSCK);
1021 }
1022
1023 *nofs = ofs_of_node(node_page);
1024 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1025 f2fs_put_page(node_page, 1);
1026
1027 if (source_blkaddr != blkaddr) {
1028#ifdef CONFIG_F2FS_CHECK_FS
1029 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1030 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1031
1032 if (unlikely(check_valid_map(sbi, segno, offset))) {
1033 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1034 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1035 blkaddr, source_blkaddr, segno);
1036 f2fs_bug_on(sbi, 1);
1037 }
1038 }
1039#endif
1040 return false;
1041 }
1042 return true;
1043}
1044
1045static int ra_data_block(struct inode *inode, pgoff_t index)
1046{
1047 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1048 struct address_space *mapping = inode->i_mapping;
1049 struct dnode_of_data dn;
1050 struct page *page;
1051 struct extent_info ei = {0, 0, 0};
1052 struct f2fs_io_info fio = {
1053 .sbi = sbi,
1054 .ino = inode->i_ino,
1055 .type = DATA,
1056 .temp = COLD,
1057 .op = REQ_OP_READ,
1058 .op_flags = 0,
1059 .encrypted_page = NULL,
1060 .in_list = false,
1061 .retry = false,
1062 };
1063 int err;
1064
1065 page = f2fs_grab_cache_page(mapping, index, true);
1066 if (!page)
1067 return -ENOMEM;
1068
1069 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1070 dn.data_blkaddr = ei.blk + index - ei.fofs;
1071 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1072 DATA_GENERIC_ENHANCE_READ))) {
1073 err = -EFSCORRUPTED;
1074 goto put_page;
1075 }
1076 goto got_it;
1077 }
1078
1079 set_new_dnode(&dn, inode, NULL, NULL, 0);
1080 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1081 if (err)
1082 goto put_page;
1083 f2fs_put_dnode(&dn);
1084
1085 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1086 err = -ENOENT;
1087 goto put_page;
1088 }
1089 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1090 DATA_GENERIC_ENHANCE))) {
1091 err = -EFSCORRUPTED;
1092 goto put_page;
1093 }
1094got_it:
1095 /* read page */
1096 fio.page = page;
1097 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1098
1099 /*
1100 * don't cache encrypted data into meta inode until previous dirty
1101 * data were writebacked to avoid racing between GC and flush.
1102 */
1103 f2fs_wait_on_page_writeback(page, DATA, true, true);
1104
1105 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1106
1107 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1108 dn.data_blkaddr,
1109 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1110 if (!fio.encrypted_page) {
1111 err = -ENOMEM;
1112 goto put_page;
1113 }
1114
1115 err = f2fs_submit_page_bio(&fio);
1116 if (err)
1117 goto put_encrypted_page;
1118 f2fs_put_page(fio.encrypted_page, 0);
1119 f2fs_put_page(page, 1);
1120
1121 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1122 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1123
1124 return 0;
1125put_encrypted_page:
1126 f2fs_put_page(fio.encrypted_page, 1);
1127put_page:
1128 f2fs_put_page(page, 1);
1129 return err;
1130}
1131
1132/*
1133 * Move data block via META_MAPPING while keeping locked data page.
1134 * This can be used to move blocks, aka LBAs, directly on disk.
1135 */
1136static int move_data_block(struct inode *inode, block_t bidx,
1137 int gc_type, unsigned int segno, int off)
1138{
1139 struct f2fs_io_info fio = {
1140 .sbi = F2FS_I_SB(inode),
1141 .ino = inode->i_ino,
1142 .type = DATA,
1143 .temp = COLD,
1144 .op = REQ_OP_READ,
1145 .op_flags = 0,
1146 .encrypted_page = NULL,
1147 .in_list = false,
1148 .retry = false,
1149 };
1150 struct dnode_of_data dn;
1151 struct f2fs_summary sum;
1152 struct node_info ni;
1153 struct page *page, *mpage;
1154 block_t newaddr;
1155 int err = 0;
1156 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1157 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1158 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1159 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1160
1161 /* do not read out */
1162 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1163 if (!page)
1164 return -ENOMEM;
1165
1166 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1167 err = -ENOENT;
1168 goto out;
1169 }
1170
1171 if (f2fs_is_atomic_file(inode)) {
1172 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1173 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1174 err = -EAGAIN;
1175 goto out;
1176 }
1177
1178 if (f2fs_is_pinned_file(inode)) {
1179 f2fs_pin_file_control(inode, true);
1180 err = -EAGAIN;
1181 goto out;
1182 }
1183
1184 set_new_dnode(&dn, inode, NULL, NULL, 0);
1185 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1186 if (err)
1187 goto out;
1188
1189 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1190 ClearPageUptodate(page);
1191 err = -ENOENT;
1192 goto put_out;
1193 }
1194
1195 /*
1196 * don't cache encrypted data into meta inode until previous dirty
1197 * data were writebacked to avoid racing between GC and flush.
1198 */
1199 f2fs_wait_on_page_writeback(page, DATA, true, true);
1200
1201 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1202
1203 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1204 if (err)
1205 goto put_out;
1206
1207 /* read page */
1208 fio.page = page;
1209 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1210
1211 if (lfs_mode)
1212 down_write(&fio.sbi->io_order_lock);
1213
1214 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1215 fio.old_blkaddr, false);
1216 if (!mpage) {
1217 err = -ENOMEM;
1218 goto up_out;
1219 }
1220
1221 fio.encrypted_page = mpage;
1222
1223 /* read source block in mpage */
1224 if (!PageUptodate(mpage)) {
1225 err = f2fs_submit_page_bio(&fio);
1226 if (err) {
1227 f2fs_put_page(mpage, 1);
1228 goto up_out;
1229 }
1230
1231 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1232 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1233
1234 lock_page(mpage);
1235 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1236 !PageUptodate(mpage))) {
1237 err = -EIO;
1238 f2fs_put_page(mpage, 1);
1239 goto up_out;
1240 }
1241 }
1242
1243 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1244
1245 /* allocate block address */
1246 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1247 &sum, type, NULL);
1248
1249 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1250 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1251 if (!fio.encrypted_page) {
1252 err = -ENOMEM;
1253 f2fs_put_page(mpage, 1);
1254 goto recover_block;
1255 }
1256
1257 /* write target block */
1258 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1259 memcpy(page_address(fio.encrypted_page),
1260 page_address(mpage), PAGE_SIZE);
1261 f2fs_put_page(mpage, 1);
1262 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1263 fio.old_blkaddr, fio.old_blkaddr);
1264 f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
1265
1266 set_page_dirty(fio.encrypted_page);
1267 if (clear_page_dirty_for_io(fio.encrypted_page))
1268 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1269
1270 set_page_writeback(fio.encrypted_page);
1271 ClearPageError(page);
1272
1273 fio.op = REQ_OP_WRITE;
1274 fio.op_flags = REQ_SYNC;
1275 fio.new_blkaddr = newaddr;
1276 f2fs_submit_page_write(&fio);
1277 if (fio.retry) {
1278 err = -EAGAIN;
1279 if (PageWriteback(fio.encrypted_page))
1280 end_page_writeback(fio.encrypted_page);
1281 goto put_page_out;
1282 }
1283
1284 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1285
1286 f2fs_update_data_blkaddr(&dn, newaddr);
1287 set_inode_flag(inode, FI_APPEND_WRITE);
1288 if (page->index == 0)
1289 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1290put_page_out:
1291 f2fs_put_page(fio.encrypted_page, 1);
1292recover_block:
1293 if (err)
1294 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1295 true, true, true);
1296up_out:
1297 if (lfs_mode)
1298 up_write(&fio.sbi->io_order_lock);
1299put_out:
1300 f2fs_put_dnode(&dn);
1301out:
1302 f2fs_put_page(page, 1);
1303 return err;
1304}
1305
1306static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1307 unsigned int segno, int off)
1308{
1309 struct page *page;
1310 int err = 0;
1311
1312 page = f2fs_get_lock_data_page(inode, bidx, true);
1313 if (IS_ERR(page))
1314 return PTR_ERR(page);
1315
1316 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1317 err = -ENOENT;
1318 goto out;
1319 }
1320
1321 if (f2fs_is_atomic_file(inode)) {
1322 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1323 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1324 err = -EAGAIN;
1325 goto out;
1326 }
1327 if (f2fs_is_pinned_file(inode)) {
1328 if (gc_type == FG_GC)
1329 f2fs_pin_file_control(inode, true);
1330 err = -EAGAIN;
1331 goto out;
1332 }
1333
1334 if (gc_type == BG_GC) {
1335 if (PageWriteback(page)) {
1336 err = -EAGAIN;
1337 goto out;
1338 }
1339 set_page_dirty(page);
1340 set_page_private_gcing(page);
1341 } else {
1342 struct f2fs_io_info fio = {
1343 .sbi = F2FS_I_SB(inode),
1344 .ino = inode->i_ino,
1345 .type = DATA,
1346 .temp = COLD,
1347 .op = REQ_OP_WRITE,
1348 .op_flags = REQ_SYNC,
1349 .old_blkaddr = NULL_ADDR,
1350 .page = page,
1351 .encrypted_page = NULL,
1352 .need_lock = LOCK_REQ,
1353 .io_type = FS_GC_DATA_IO,
1354 };
1355 bool is_dirty = PageDirty(page);
1356
1357retry:
1358 f2fs_wait_on_page_writeback(page, DATA, true, true);
1359
1360 set_page_dirty(page);
1361 if (clear_page_dirty_for_io(page)) {
1362 inode_dec_dirty_pages(inode);
1363 f2fs_remove_dirty_inode(inode);
1364 }
1365
1366 set_page_private_gcing(page);
1367
1368 err = f2fs_do_write_data_page(&fio);
1369 if (err) {
1370 clear_page_private_gcing(page);
1371 if (err == -ENOMEM) {
1372 congestion_wait(BLK_RW_ASYNC,
1373 DEFAULT_IO_TIMEOUT);
1374 goto retry;
1375 }
1376 if (is_dirty)
1377 set_page_dirty(page);
1378 }
1379 }
1380out:
1381 f2fs_put_page(page, 1);
1382 return err;
1383}
1384
1385/*
1386 * This function tries to get parent node of victim data block, and identifies
1387 * data block validity. If the block is valid, copy that with cold status and
1388 * modify parent node.
1389 * If the parent node is not valid or the data block address is different,
1390 * the victim data block is ignored.
1391 */
1392static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1393 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1394 bool force_migrate)
1395{
1396 struct super_block *sb = sbi->sb;
1397 struct f2fs_summary *entry;
1398 block_t start_addr;
1399 int off;
1400 int phase = 0;
1401 int submitted = 0;
1402 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1403
1404 start_addr = START_BLOCK(sbi, segno);
1405
1406next_step:
1407 entry = sum;
1408
1409 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1410 struct page *data_page;
1411 struct inode *inode;
1412 struct node_info dni; /* dnode info for the data */
1413 unsigned int ofs_in_node, nofs;
1414 block_t start_bidx;
1415 nid_t nid = le32_to_cpu(entry->nid);
1416
1417 /*
1418 * stop BG_GC if there is not enough free sections.
1419 * Or, stop GC if the segment becomes fully valid caused by
1420 * race condition along with SSR block allocation.
1421 */
1422 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1423 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1424 BLKS_PER_SEC(sbi)))
1425 return submitted;
1426
1427 if (check_valid_map(sbi, segno, off) == 0)
1428 continue;
1429
1430 if (phase == 0) {
1431 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1432 META_NAT, true);
1433 continue;
1434 }
1435
1436 if (phase == 1) {
1437 f2fs_ra_node_page(sbi, nid);
1438 continue;
1439 }
1440
1441 /* Get an inode by ino with checking validity */
1442 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1443 continue;
1444
1445 if (phase == 2) {
1446 f2fs_ra_node_page(sbi, dni.ino);
1447 continue;
1448 }
1449
1450 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1451
1452 if (phase == 3) {
1453 inode = f2fs_iget(sb, dni.ino);
1454 if (IS_ERR(inode) || is_bad_inode(inode))
1455 continue;
1456
1457 if (!down_write_trylock(
1458 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1459 iput(inode);
1460 sbi->skipped_gc_rwsem++;
1461 continue;
1462 }
1463
1464 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1465 ofs_in_node;
1466
1467 if (f2fs_post_read_required(inode)) {
1468 int err = ra_data_block(inode, start_bidx);
1469
1470 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1471 if (err) {
1472 iput(inode);
1473 continue;
1474 }
1475 add_gc_inode(gc_list, inode);
1476 continue;
1477 }
1478
1479 data_page = f2fs_get_read_data_page(inode,
1480 start_bidx, REQ_RAHEAD, true);
1481 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1482 if (IS_ERR(data_page)) {
1483 iput(inode);
1484 continue;
1485 }
1486
1487 f2fs_put_page(data_page, 0);
1488 add_gc_inode(gc_list, inode);
1489 continue;
1490 }
1491
1492 /* phase 4 */
1493 inode = find_gc_inode(gc_list, dni.ino);
1494 if (inode) {
1495 struct f2fs_inode_info *fi = F2FS_I(inode);
1496 bool locked = false;
1497 int err;
1498
1499 if (S_ISREG(inode->i_mode)) {
1500 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
1501 sbi->skipped_gc_rwsem++;
1502 continue;
1503 }
1504 if (!down_write_trylock(
1505 &fi->i_gc_rwsem[WRITE])) {
1506 sbi->skipped_gc_rwsem++;
1507 up_write(&fi->i_gc_rwsem[READ]);
1508 continue;
1509 }
1510 locked = true;
1511
1512 /* wait for all inflight aio data */
1513 inode_dio_wait(inode);
1514 }
1515
1516 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1517 + ofs_in_node;
1518 if (f2fs_post_read_required(inode))
1519 err = move_data_block(inode, start_bidx,
1520 gc_type, segno, off);
1521 else
1522 err = move_data_page(inode, start_bidx, gc_type,
1523 segno, off);
1524
1525 if (!err && (gc_type == FG_GC ||
1526 f2fs_post_read_required(inode)))
1527 submitted++;
1528
1529 if (locked) {
1530 up_write(&fi->i_gc_rwsem[WRITE]);
1531 up_write(&fi->i_gc_rwsem[READ]);
1532 }
1533
1534 stat_inc_data_blk_count(sbi, 1, gc_type);
1535 }
1536 }
1537
1538 if (++phase < 5)
1539 goto next_step;
1540
1541 return submitted;
1542}
1543
1544static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1545 int gc_type)
1546{
1547 struct sit_info *sit_i = SIT_I(sbi);
1548 int ret;
1549
1550 down_write(&sit_i->sentry_lock);
1551 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1552 NO_CHECK_TYPE, LFS, 0);
1553 up_write(&sit_i->sentry_lock);
1554 return ret;
1555}
1556
1557static int do_garbage_collect(struct f2fs_sb_info *sbi,
1558 unsigned int start_segno,
1559 struct gc_inode_list *gc_list, int gc_type,
1560 bool force_migrate)
1561{
1562 struct page *sum_page;
1563 struct f2fs_summary_block *sum;
1564 struct blk_plug plug;
1565 unsigned int segno = start_segno;
1566 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1567 int seg_freed = 0, migrated = 0;
1568 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1569 SUM_TYPE_DATA : SUM_TYPE_NODE;
1570 int submitted = 0;
1571
1572 if (__is_large_section(sbi))
1573 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1574
1575 /*
1576 * zone-capacity can be less than zone-size in zoned devices,
1577 * resulting in less than expected usable segments in the zone,
1578 * calculate the end segno in the zone which can be garbage collected
1579 */
1580 if (f2fs_sb_has_blkzoned(sbi))
1581 end_segno -= sbi->segs_per_sec -
1582 f2fs_usable_segs_in_sec(sbi, segno);
1583
1584 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1585
1586 /* readahead multi ssa blocks those have contiguous address */
1587 if (__is_large_section(sbi))
1588 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1589 end_segno - segno, META_SSA, true);
1590
1591 /* reference all summary page */
1592 while (segno < end_segno) {
1593 sum_page = f2fs_get_sum_page(sbi, segno++);
1594 if (IS_ERR(sum_page)) {
1595 int err = PTR_ERR(sum_page);
1596
1597 end_segno = segno - 1;
1598 for (segno = start_segno; segno < end_segno; segno++) {
1599 sum_page = find_get_page(META_MAPPING(sbi),
1600 GET_SUM_BLOCK(sbi, segno));
1601 f2fs_put_page(sum_page, 0);
1602 f2fs_put_page(sum_page, 0);
1603 }
1604 return err;
1605 }
1606 unlock_page(sum_page);
1607 }
1608
1609 blk_start_plug(&plug);
1610
1611 for (segno = start_segno; segno < end_segno; segno++) {
1612
1613 /* find segment summary of victim */
1614 sum_page = find_get_page(META_MAPPING(sbi),
1615 GET_SUM_BLOCK(sbi, segno));
1616 f2fs_put_page(sum_page, 0);
1617
1618 if (get_valid_blocks(sbi, segno, false) == 0)
1619 goto freed;
1620 if (gc_type == BG_GC && __is_large_section(sbi) &&
1621 migrated >= sbi->migration_granularity)
1622 goto skip;
1623 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1624 goto skip;
1625
1626 sum = page_address(sum_page);
1627 if (type != GET_SUM_TYPE((&sum->footer))) {
1628 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1629 segno, type, GET_SUM_TYPE((&sum->footer)));
1630 set_sbi_flag(sbi, SBI_NEED_FSCK);
1631 f2fs_stop_checkpoint(sbi, false);
1632 goto skip;
1633 }
1634
1635 /*
1636 * this is to avoid deadlock:
1637 * - lock_page(sum_page) - f2fs_replace_block
1638 * - check_valid_map() - down_write(sentry_lock)
1639 * - down_read(sentry_lock) - change_curseg()
1640 * - lock_page(sum_page)
1641 */
1642 if (type == SUM_TYPE_NODE)
1643 submitted += gc_node_segment(sbi, sum->entries, segno,
1644 gc_type);
1645 else
1646 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1647 segno, gc_type,
1648 force_migrate);
1649
1650 stat_inc_seg_count(sbi, type, gc_type);
1651 migrated++;
1652
1653freed:
1654 if (gc_type == FG_GC &&
1655 get_valid_blocks(sbi, segno, false) == 0)
1656 seg_freed++;
1657
1658 if (__is_large_section(sbi) && segno + 1 < end_segno)
1659 sbi->next_victim_seg[gc_type] = segno + 1;
1660skip:
1661 f2fs_put_page(sum_page, 0);
1662 }
1663
1664 if (submitted)
1665 f2fs_submit_merged_write(sbi,
1666 (type == SUM_TYPE_NODE) ? NODE : DATA);
1667
1668 blk_finish_plug(&plug);
1669
1670 stat_inc_call_count(sbi->stat_info);
1671
1672 return seg_freed;
1673}
1674
1675int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1676 bool background, bool force, unsigned int segno)
1677{
1678 int gc_type = sync ? FG_GC : BG_GC;
1679 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1680 int ret = 0;
1681 struct cp_control cpc;
1682 unsigned int init_segno = segno;
1683 struct gc_inode_list gc_list = {
1684 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1685 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1686 };
1687 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1688 unsigned long long first_skipped;
1689 unsigned int skipped_round = 0, round = 0;
1690
1691 trace_f2fs_gc_begin(sbi->sb, sync, background,
1692 get_pages(sbi, F2FS_DIRTY_NODES),
1693 get_pages(sbi, F2FS_DIRTY_DENTS),
1694 get_pages(sbi, F2FS_DIRTY_IMETA),
1695 free_sections(sbi),
1696 free_segments(sbi),
1697 reserved_segments(sbi),
1698 prefree_segments(sbi));
1699
1700 cpc.reason = __get_cp_reason(sbi);
1701 sbi->skipped_gc_rwsem = 0;
1702 first_skipped = last_skipped;
1703gc_more:
1704 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1705 ret = -EINVAL;
1706 goto stop;
1707 }
1708 if (unlikely(f2fs_cp_error(sbi))) {
1709 ret = -EIO;
1710 goto stop;
1711 }
1712
1713 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1714 /*
1715 * For example, if there are many prefree_segments below given
1716 * threshold, we can make them free by checkpoint. Then, we
1717 * secure free segments which doesn't need fggc any more.
1718 */
1719 if (prefree_segments(sbi) &&
1720 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1721 ret = f2fs_write_checkpoint(sbi, &cpc);
1722 if (ret)
1723 goto stop;
1724 }
1725 if (has_not_enough_free_secs(sbi, 0, 0))
1726 gc_type = FG_GC;
1727 }
1728
1729 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1730 if (gc_type == BG_GC && !background) {
1731 ret = -EINVAL;
1732 goto stop;
1733 }
1734 ret = __get_victim(sbi, &segno, gc_type);
1735 if (ret)
1736 goto stop;
1737
1738 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1739 if (gc_type == FG_GC &&
1740 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1741 sec_freed++;
1742 total_freed += seg_freed;
1743
1744 if (gc_type == FG_GC) {
1745 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1746 sbi->skipped_gc_rwsem)
1747 skipped_round++;
1748 last_skipped = sbi->skipped_atomic_files[FG_GC];
1749 round++;
1750 }
1751
1752 if (gc_type == FG_GC && seg_freed)
1753 sbi->cur_victim_sec = NULL_SEGNO;
1754
1755 if (sync)
1756 goto stop;
1757
1758 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1759 if (skipped_round <= MAX_SKIP_GC_COUNT ||
1760 skipped_round * 2 < round) {
1761 segno = NULL_SEGNO;
1762 goto gc_more;
1763 }
1764
1765 if (first_skipped < last_skipped &&
1766 (last_skipped - first_skipped) >
1767 sbi->skipped_gc_rwsem) {
1768 f2fs_drop_inmem_pages_all(sbi, true);
1769 segno = NULL_SEGNO;
1770 goto gc_more;
1771 }
1772 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1773 ret = f2fs_write_checkpoint(sbi, &cpc);
1774 }
1775stop:
1776 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1777 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1778
1779 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1780 get_pages(sbi, F2FS_DIRTY_NODES),
1781 get_pages(sbi, F2FS_DIRTY_DENTS),
1782 get_pages(sbi, F2FS_DIRTY_IMETA),
1783 free_sections(sbi),
1784 free_segments(sbi),
1785 reserved_segments(sbi),
1786 prefree_segments(sbi));
1787
1788 up_write(&sbi->gc_lock);
1789
1790 put_gc_inode(&gc_list);
1791
1792 if (sync && !ret)
1793 ret = sec_freed ? 0 : -EAGAIN;
1794 return ret;
1795}
1796
1797int __init f2fs_create_garbage_collection_cache(void)
1798{
1799 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1800 sizeof(struct victim_entry));
1801 if (!victim_entry_slab)
1802 return -ENOMEM;
1803 return 0;
1804}
1805
1806void f2fs_destroy_garbage_collection_cache(void)
1807{
1808 kmem_cache_destroy(victim_entry_slab);
1809}
1810
1811static void init_atgc_management(struct f2fs_sb_info *sbi)
1812{
1813 struct atgc_management *am = &sbi->am;
1814
1815 if (test_opt(sbi, ATGC) &&
1816 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1817 am->atgc_enabled = true;
1818
1819 am->root = RB_ROOT_CACHED;
1820 INIT_LIST_HEAD(&am->victim_list);
1821 am->victim_count = 0;
1822
1823 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1824 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1825 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1826 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1827}
1828
1829void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1830{
1831 DIRTY_I(sbi)->v_ops = &default_v_ops;
1832
1833 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1834
1835 /* give warm/cold data area from slower device */
1836 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1837 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1838 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1839
1840 init_atgc_management(sbi);
1841}
1842
1843static int free_segment_range(struct f2fs_sb_info *sbi,
1844 unsigned int secs, bool gc_only)
1845{
1846 unsigned int segno, next_inuse, start, end;
1847 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1848 int gc_mode, gc_type;
1849 int err = 0;
1850 int type;
1851
1852 /* Force block allocation for GC */
1853 MAIN_SECS(sbi) -= secs;
1854 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1855 end = MAIN_SEGS(sbi) - 1;
1856
1857 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1858 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1859 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1860 SIT_I(sbi)->last_victim[gc_mode] = 0;
1861
1862 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1863 if (sbi->next_victim_seg[gc_type] >= start)
1864 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1865 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1866
1867 /* Move out cursegs from the target range */
1868 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1869 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1870
1871 /* do GC to move out valid blocks in the range */
1872 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1873 struct gc_inode_list gc_list = {
1874 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1875 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1876 };
1877
1878 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1879 put_gc_inode(&gc_list);
1880
1881 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1882 err = -EAGAIN;
1883 goto out;
1884 }
1885 if (fatal_signal_pending(current)) {
1886 err = -ERESTARTSYS;
1887 goto out;
1888 }
1889 }
1890 if (gc_only)
1891 goto out;
1892
1893 err = f2fs_write_checkpoint(sbi, &cpc);
1894 if (err)
1895 goto out;
1896
1897 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1898 if (next_inuse <= end) {
1899 f2fs_err(sbi, "segno %u should be free but still inuse!",
1900 next_inuse);
1901 f2fs_bug_on(sbi, 1);
1902 }
1903out:
1904 MAIN_SECS(sbi) += secs;
1905 return err;
1906}
1907
1908static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1909{
1910 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1911 int section_count;
1912 int segment_count;
1913 int segment_count_main;
1914 long long block_count;
1915 int segs = secs * sbi->segs_per_sec;
1916
1917 down_write(&sbi->sb_lock);
1918
1919 section_count = le32_to_cpu(raw_sb->section_count);
1920 segment_count = le32_to_cpu(raw_sb->segment_count);
1921 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1922 block_count = le64_to_cpu(raw_sb->block_count);
1923
1924 raw_sb->section_count = cpu_to_le32(section_count + secs);
1925 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1926 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1927 raw_sb->block_count = cpu_to_le64(block_count +
1928 (long long)segs * sbi->blocks_per_seg);
1929 if (f2fs_is_multi_device(sbi)) {
1930 int last_dev = sbi->s_ndevs - 1;
1931 int dev_segs =
1932 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1933
1934 raw_sb->devs[last_dev].total_segments =
1935 cpu_to_le32(dev_segs + segs);
1936 }
1937
1938 up_write(&sbi->sb_lock);
1939}
1940
1941static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1942{
1943 int segs = secs * sbi->segs_per_sec;
1944 long long blks = (long long)segs * sbi->blocks_per_seg;
1945 long long user_block_count =
1946 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1947
1948 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1949 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1950 MAIN_SECS(sbi) += secs;
1951 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1952 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1953 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1954
1955 if (f2fs_is_multi_device(sbi)) {
1956 int last_dev = sbi->s_ndevs - 1;
1957
1958 FDEV(last_dev).total_segments =
1959 (int)FDEV(last_dev).total_segments + segs;
1960 FDEV(last_dev).end_blk =
1961 (long long)FDEV(last_dev).end_blk + blks;
1962#ifdef CONFIG_BLK_DEV_ZONED
1963 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1964 (int)(blks >> sbi->log_blocks_per_blkz);
1965#endif
1966 }
1967}
1968
1969int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1970{
1971 __u64 old_block_count, shrunk_blocks;
1972 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1973 unsigned int secs;
1974 int err = 0;
1975 __u32 rem;
1976
1977 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1978 if (block_count > old_block_count)
1979 return -EINVAL;
1980
1981 if (f2fs_is_multi_device(sbi)) {
1982 int last_dev = sbi->s_ndevs - 1;
1983 __u64 last_segs = FDEV(last_dev).total_segments;
1984
1985 if (block_count + last_segs * sbi->blocks_per_seg <=
1986 old_block_count)
1987 return -EINVAL;
1988 }
1989
1990 /* new fs size should align to section size */
1991 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1992 if (rem)
1993 return -EINVAL;
1994
1995 if (block_count == old_block_count)
1996 return 0;
1997
1998 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1999 f2fs_err(sbi, "Should run fsck to repair first.");
2000 return -EFSCORRUPTED;
2001 }
2002
2003 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2004 f2fs_err(sbi, "Checkpoint should be enabled.");
2005 return -EINVAL;
2006 }
2007
2008 shrunk_blocks = old_block_count - block_count;
2009 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2010
2011 /* stop other GC */
2012 if (!down_write_trylock(&sbi->gc_lock))
2013 return -EAGAIN;
2014
2015 /* stop CP to protect MAIN_SEC in free_segment_range */
2016 f2fs_lock_op(sbi);
2017
2018 spin_lock(&sbi->stat_lock);
2019 if (shrunk_blocks + valid_user_blocks(sbi) +
2020 sbi->current_reserved_blocks + sbi->unusable_block_count +
2021 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2022 err = -ENOSPC;
2023 spin_unlock(&sbi->stat_lock);
2024
2025 if (err)
2026 goto out_unlock;
2027
2028 err = free_segment_range(sbi, secs, true);
2029
2030out_unlock:
2031 f2fs_unlock_op(sbi);
2032 up_write(&sbi->gc_lock);
2033 if (err)
2034 return err;
2035
2036 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2037
2038 freeze_super(sbi->sb);
2039 down_write(&sbi->gc_lock);
2040 down_write(&sbi->cp_global_sem);
2041
2042 spin_lock(&sbi->stat_lock);
2043 if (shrunk_blocks + valid_user_blocks(sbi) +
2044 sbi->current_reserved_blocks + sbi->unusable_block_count +
2045 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2046 err = -ENOSPC;
2047 else
2048 sbi->user_block_count -= shrunk_blocks;
2049 spin_unlock(&sbi->stat_lock);
2050 if (err)
2051 goto out_err;
2052
2053 err = free_segment_range(sbi, secs, false);
2054 if (err)
2055 goto recover_out;
2056
2057 update_sb_metadata(sbi, -secs);
2058
2059 err = f2fs_commit_super(sbi, false);
2060 if (err) {
2061 update_sb_metadata(sbi, secs);
2062 goto recover_out;
2063 }
2064
2065 update_fs_metadata(sbi, -secs);
2066 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2067 set_sbi_flag(sbi, SBI_IS_DIRTY);
2068
2069 err = f2fs_write_checkpoint(sbi, &cpc);
2070 if (err) {
2071 update_fs_metadata(sbi, secs);
2072 update_sb_metadata(sbi, secs);
2073 f2fs_commit_super(sbi, false);
2074 }
2075recover_out:
2076 if (err) {
2077 set_sbi_flag(sbi, SBI_NEED_FSCK);
2078 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2079
2080 spin_lock(&sbi->stat_lock);
2081 sbi->user_block_count += shrunk_blocks;
2082 spin_unlock(&sbi->stat_lock);
2083 }
2084out_err:
2085 up_write(&sbi->cp_global_sem);
2086 up_write(&sbi->gc_lock);
2087 thaw_super(sbi->sb);
2088 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2089 return err;
2090}