Loading...
1/*
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
14#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
19
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
24#include <trace/events/f2fs.h>
25
26static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 long wait_ms;
32
33 wait_ms = gc_th->min_sleep_time;
34
35 do {
36 if (try_to_freeze())
37 continue;
38 else
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
43 break;
44
45 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
46 increase_sleep_time(gc_th, &wait_ms);
47 continue;
48 }
49
50#ifdef CONFIG_F2FS_FAULT_INJECTION
51 if (time_to_inject(sbi, FAULT_CHECKPOINT))
52 f2fs_stop_checkpoint(sbi, false);
53#endif
54
55 /*
56 * [GC triggering condition]
57 * 0. GC is not conducted currently.
58 * 1. There are enough dirty segments.
59 * 2. IO subsystem is idle by checking the # of writeback pages.
60 * 3. IO subsystem is idle by checking the # of requests in
61 * bdev's request list.
62 *
63 * Note) We have to avoid triggering GCs frequently.
64 * Because it is possible that some segments can be
65 * invalidated soon after by user update or deletion.
66 * So, I'd like to wait some time to collect dirty segments.
67 */
68 if (!mutex_trylock(&sbi->gc_mutex))
69 continue;
70
71 if (!is_idle(sbi)) {
72 increase_sleep_time(gc_th, &wait_ms);
73 mutex_unlock(&sbi->gc_mutex);
74 continue;
75 }
76
77 if (has_enough_invalid_blocks(sbi))
78 decrease_sleep_time(gc_th, &wait_ms);
79 else
80 increase_sleep_time(gc_th, &wait_ms);
81
82 stat_inc_bggc_count(sbi);
83
84 /* if return value is not zero, no victim was selected */
85 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
86 wait_ms = gc_th->no_gc_sleep_time;
87
88 trace_f2fs_background_gc(sbi->sb, wait_ms,
89 prefree_segments(sbi), free_segments(sbi));
90
91 /* balancing f2fs's metadata periodically */
92 f2fs_balance_fs_bg(sbi);
93
94 } while (!kthread_should_stop());
95 return 0;
96}
97
98int start_gc_thread(struct f2fs_sb_info *sbi)
99{
100 struct f2fs_gc_kthread *gc_th;
101 dev_t dev = sbi->sb->s_bdev->bd_dev;
102 int err = 0;
103
104 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
105 if (!gc_th) {
106 err = -ENOMEM;
107 goto out;
108 }
109
110 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
111 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
112 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
113
114 gc_th->gc_idle = 0;
115
116 sbi->gc_thread = gc_th;
117 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
118 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
119 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
120 if (IS_ERR(gc_th->f2fs_gc_task)) {
121 err = PTR_ERR(gc_th->f2fs_gc_task);
122 kfree(gc_th);
123 sbi->gc_thread = NULL;
124 }
125out:
126 return err;
127}
128
129void stop_gc_thread(struct f2fs_sb_info *sbi)
130{
131 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
132 if (!gc_th)
133 return;
134 kthread_stop(gc_th->f2fs_gc_task);
135 kfree(gc_th);
136 sbi->gc_thread = NULL;
137}
138
139static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
140{
141 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
142
143 if (gc_th && gc_th->gc_idle) {
144 if (gc_th->gc_idle == 1)
145 gc_mode = GC_CB;
146 else if (gc_th->gc_idle == 2)
147 gc_mode = GC_GREEDY;
148 }
149 return gc_mode;
150}
151
152static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
153 int type, struct victim_sel_policy *p)
154{
155 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
156
157 if (p->alloc_mode == SSR) {
158 p->gc_mode = GC_GREEDY;
159 p->dirty_segmap = dirty_i->dirty_segmap[type];
160 p->max_search = dirty_i->nr_dirty[type];
161 p->ofs_unit = 1;
162 } else {
163 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
164 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
165 p->max_search = dirty_i->nr_dirty[DIRTY];
166 p->ofs_unit = sbi->segs_per_sec;
167 }
168
169 /* we need to check every dirty segments in the FG_GC case */
170 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
171 p->max_search = sbi->max_victim_search;
172
173 p->offset = sbi->last_victim[p->gc_mode];
174}
175
176static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
177 struct victim_sel_policy *p)
178{
179 /* SSR allocates in a segment unit */
180 if (p->alloc_mode == SSR)
181 return sbi->blocks_per_seg;
182 if (p->gc_mode == GC_GREEDY)
183 return sbi->blocks_per_seg * p->ofs_unit;
184 else if (p->gc_mode == GC_CB)
185 return UINT_MAX;
186 else /* No other gc_mode */
187 return 0;
188}
189
190static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
191{
192 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
193 unsigned int secno;
194
195 /*
196 * If the gc_type is FG_GC, we can select victim segments
197 * selected by background GC before.
198 * Those segments guarantee they have small valid blocks.
199 */
200 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
201 if (sec_usage_check(sbi, secno))
202 continue;
203
204 if (no_fggc_candidate(sbi, secno))
205 continue;
206
207 clear_bit(secno, dirty_i->victim_secmap);
208 return secno * sbi->segs_per_sec;
209 }
210 return NULL_SEGNO;
211}
212
213static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
214{
215 struct sit_info *sit_i = SIT_I(sbi);
216 unsigned int secno = GET_SECNO(sbi, segno);
217 unsigned int start = secno * sbi->segs_per_sec;
218 unsigned long long mtime = 0;
219 unsigned int vblocks;
220 unsigned char age = 0;
221 unsigned char u;
222 unsigned int i;
223
224 for (i = 0; i < sbi->segs_per_sec; i++)
225 mtime += get_seg_entry(sbi, start + i)->mtime;
226 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
227
228 mtime = div_u64(mtime, sbi->segs_per_sec);
229 vblocks = div_u64(vblocks, sbi->segs_per_sec);
230
231 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
232
233 /* Handle if the system time has changed by the user */
234 if (mtime < sit_i->min_mtime)
235 sit_i->min_mtime = mtime;
236 if (mtime > sit_i->max_mtime)
237 sit_i->max_mtime = mtime;
238 if (sit_i->max_mtime != sit_i->min_mtime)
239 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
240 sit_i->max_mtime - sit_i->min_mtime);
241
242 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
243}
244
245static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
246 unsigned int segno, struct victim_sel_policy *p)
247{
248 if (p->alloc_mode == SSR)
249 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
250
251 /* alloc_mode == LFS */
252 if (p->gc_mode == GC_GREEDY)
253 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
254 else
255 return get_cb_cost(sbi, segno);
256}
257
258static unsigned int count_bits(const unsigned long *addr,
259 unsigned int offset, unsigned int len)
260{
261 unsigned int end = offset + len, sum = 0;
262
263 while (offset < end) {
264 if (test_bit(offset++, addr))
265 ++sum;
266 }
267 return sum;
268}
269
270/*
271 * This function is called from two paths.
272 * One is garbage collection and the other is SSR segment selection.
273 * When it is called during GC, it just gets a victim segment
274 * and it does not remove it from dirty seglist.
275 * When it is called from SSR segment selection, it finds a segment
276 * which has minimum valid blocks and removes it from dirty seglist.
277 */
278static int get_victim_by_default(struct f2fs_sb_info *sbi,
279 unsigned int *result, int gc_type, int type, char alloc_mode)
280{
281 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
282 struct victim_sel_policy p;
283 unsigned int secno, last_victim;
284 unsigned int last_segment = MAIN_SEGS(sbi);
285 unsigned int nsearched = 0;
286
287 mutex_lock(&dirty_i->seglist_lock);
288
289 p.alloc_mode = alloc_mode;
290 select_policy(sbi, gc_type, type, &p);
291
292 p.min_segno = NULL_SEGNO;
293 p.min_cost = get_max_cost(sbi, &p);
294
295 if (p.max_search == 0)
296 goto out;
297
298 last_victim = sbi->last_victim[p.gc_mode];
299 if (p.alloc_mode == LFS && gc_type == FG_GC) {
300 p.min_segno = check_bg_victims(sbi);
301 if (p.min_segno != NULL_SEGNO)
302 goto got_it;
303 }
304
305 while (1) {
306 unsigned long cost;
307 unsigned int segno;
308
309 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
310 if (segno >= last_segment) {
311 if (sbi->last_victim[p.gc_mode]) {
312 last_segment = sbi->last_victim[p.gc_mode];
313 sbi->last_victim[p.gc_mode] = 0;
314 p.offset = 0;
315 continue;
316 }
317 break;
318 }
319
320 p.offset = segno + p.ofs_unit;
321 if (p.ofs_unit > 1) {
322 p.offset -= segno % p.ofs_unit;
323 nsearched += count_bits(p.dirty_segmap,
324 p.offset - p.ofs_unit,
325 p.ofs_unit);
326 } else {
327 nsearched++;
328 }
329
330 secno = GET_SECNO(sbi, segno);
331
332 if (sec_usage_check(sbi, secno))
333 goto next;
334 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
335 goto next;
336 if (gc_type == FG_GC && p.alloc_mode == LFS &&
337 no_fggc_candidate(sbi, secno))
338 goto next;
339
340 cost = get_gc_cost(sbi, segno, &p);
341
342 if (p.min_cost > cost) {
343 p.min_segno = segno;
344 p.min_cost = cost;
345 }
346next:
347 if (nsearched >= p.max_search) {
348 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
349 sbi->last_victim[p.gc_mode] = last_victim + 1;
350 else
351 sbi->last_victim[p.gc_mode] = segno + 1;
352 break;
353 }
354 }
355 if (p.min_segno != NULL_SEGNO) {
356got_it:
357 if (p.alloc_mode == LFS) {
358 secno = GET_SECNO(sbi, p.min_segno);
359 if (gc_type == FG_GC)
360 sbi->cur_victim_sec = secno;
361 else
362 set_bit(secno, dirty_i->victim_secmap);
363 }
364 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
365
366 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
367 sbi->cur_victim_sec,
368 prefree_segments(sbi), free_segments(sbi));
369 }
370out:
371 mutex_unlock(&dirty_i->seglist_lock);
372
373 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
374}
375
376static const struct victim_selection default_v_ops = {
377 .get_victim = get_victim_by_default,
378};
379
380static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
381{
382 struct inode_entry *ie;
383
384 ie = radix_tree_lookup(&gc_list->iroot, ino);
385 if (ie)
386 return ie->inode;
387 return NULL;
388}
389
390static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
391{
392 struct inode_entry *new_ie;
393
394 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
395 iput(inode);
396 return;
397 }
398 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
399 new_ie->inode = inode;
400
401 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
402 list_add_tail(&new_ie->list, &gc_list->ilist);
403}
404
405static void put_gc_inode(struct gc_inode_list *gc_list)
406{
407 struct inode_entry *ie, *next_ie;
408 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
409 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
410 iput(ie->inode);
411 list_del(&ie->list);
412 kmem_cache_free(inode_entry_slab, ie);
413 }
414}
415
416static int check_valid_map(struct f2fs_sb_info *sbi,
417 unsigned int segno, int offset)
418{
419 struct sit_info *sit_i = SIT_I(sbi);
420 struct seg_entry *sentry;
421 int ret;
422
423 mutex_lock(&sit_i->sentry_lock);
424 sentry = get_seg_entry(sbi, segno);
425 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
426 mutex_unlock(&sit_i->sentry_lock);
427 return ret;
428}
429
430/*
431 * This function compares node address got in summary with that in NAT.
432 * On validity, copy that node with cold status, otherwise (invalid node)
433 * ignore that.
434 */
435static void gc_node_segment(struct f2fs_sb_info *sbi,
436 struct f2fs_summary *sum, unsigned int segno, int gc_type)
437{
438 struct f2fs_summary *entry;
439 block_t start_addr;
440 int off;
441 int phase = 0;
442
443 start_addr = START_BLOCK(sbi, segno);
444
445next_step:
446 entry = sum;
447
448 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
449 nid_t nid = le32_to_cpu(entry->nid);
450 struct page *node_page;
451 struct node_info ni;
452
453 /* stop BG_GC if there is not enough free sections. */
454 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
455 return;
456
457 if (check_valid_map(sbi, segno, off) == 0)
458 continue;
459
460 if (phase == 0) {
461 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
462 META_NAT, true);
463 continue;
464 }
465
466 if (phase == 1) {
467 ra_node_page(sbi, nid);
468 continue;
469 }
470
471 /* phase == 2 */
472 node_page = get_node_page(sbi, nid);
473 if (IS_ERR(node_page))
474 continue;
475
476 /* block may become invalid during get_node_page */
477 if (check_valid_map(sbi, segno, off) == 0) {
478 f2fs_put_page(node_page, 1);
479 continue;
480 }
481
482 get_node_info(sbi, nid, &ni);
483 if (ni.blk_addr != start_addr + off) {
484 f2fs_put_page(node_page, 1);
485 continue;
486 }
487
488 move_node_page(node_page, gc_type);
489 stat_inc_node_blk_count(sbi, 1, gc_type);
490 }
491
492 if (++phase < 3)
493 goto next_step;
494}
495
496/*
497 * Calculate start block index indicating the given node offset.
498 * Be careful, caller should give this node offset only indicating direct node
499 * blocks. If any node offsets, which point the other types of node blocks such
500 * as indirect or double indirect node blocks, are given, it must be a caller's
501 * bug.
502 */
503block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
504{
505 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
506 unsigned int bidx;
507
508 if (node_ofs == 0)
509 return 0;
510
511 if (node_ofs <= 2) {
512 bidx = node_ofs - 1;
513 } else if (node_ofs <= indirect_blks) {
514 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
515 bidx = node_ofs - 2 - dec;
516 } else {
517 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
518 bidx = node_ofs - 5 - dec;
519 }
520 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
521}
522
523static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
524 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
525{
526 struct page *node_page;
527 nid_t nid;
528 unsigned int ofs_in_node;
529 block_t source_blkaddr;
530
531 nid = le32_to_cpu(sum->nid);
532 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
533
534 node_page = get_node_page(sbi, nid);
535 if (IS_ERR(node_page))
536 return false;
537
538 get_node_info(sbi, nid, dni);
539
540 if (sum->version != dni->version) {
541 f2fs_put_page(node_page, 1);
542 return false;
543 }
544
545 *nofs = ofs_of_node(node_page);
546 source_blkaddr = datablock_addr(node_page, ofs_in_node);
547 f2fs_put_page(node_page, 1);
548
549 if (source_blkaddr != blkaddr)
550 return false;
551 return true;
552}
553
554static void move_encrypted_block(struct inode *inode, block_t bidx,
555 unsigned int segno, int off)
556{
557 struct f2fs_io_info fio = {
558 .sbi = F2FS_I_SB(inode),
559 .type = DATA,
560 .op = REQ_OP_READ,
561 .op_flags = 0,
562 .encrypted_page = NULL,
563 };
564 struct dnode_of_data dn;
565 struct f2fs_summary sum;
566 struct node_info ni;
567 struct page *page;
568 block_t newaddr;
569 int err;
570
571 /* do not read out */
572 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
573 if (!page)
574 return;
575
576 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
577 goto out;
578
579 set_new_dnode(&dn, inode, NULL, NULL, 0);
580 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
581 if (err)
582 goto out;
583
584 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
585 ClearPageUptodate(page);
586 goto put_out;
587 }
588
589 /*
590 * don't cache encrypted data into meta inode until previous dirty
591 * data were writebacked to avoid racing between GC and flush.
592 */
593 f2fs_wait_on_page_writeback(page, DATA, true);
594
595 get_node_info(fio.sbi, dn.nid, &ni);
596 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
597
598 /* read page */
599 fio.page = page;
600 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
601
602 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
603 &sum, CURSEG_COLD_DATA);
604
605 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
606 FGP_LOCK | FGP_CREAT, GFP_NOFS);
607 if (!fio.encrypted_page) {
608 err = -ENOMEM;
609 goto recover_block;
610 }
611
612 err = f2fs_submit_page_bio(&fio);
613 if (err)
614 goto put_page_out;
615
616 /* write page */
617 lock_page(fio.encrypted_page);
618
619 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
620 err = -EIO;
621 goto put_page_out;
622 }
623 if (unlikely(!PageUptodate(fio.encrypted_page))) {
624 err = -EIO;
625 goto put_page_out;
626 }
627
628 set_page_dirty(fio.encrypted_page);
629 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
630 if (clear_page_dirty_for_io(fio.encrypted_page))
631 dec_page_count(fio.sbi, F2FS_DIRTY_META);
632
633 set_page_writeback(fio.encrypted_page);
634
635 /* allocate block address */
636 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
637
638 fio.op = REQ_OP_WRITE;
639 fio.op_flags = REQ_SYNC;
640 fio.new_blkaddr = newaddr;
641 f2fs_submit_page_mbio(&fio);
642
643 f2fs_update_data_blkaddr(&dn, newaddr);
644 set_inode_flag(inode, FI_APPEND_WRITE);
645 if (page->index == 0)
646 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
647put_page_out:
648 f2fs_put_page(fio.encrypted_page, 1);
649recover_block:
650 if (err)
651 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
652 true, true);
653put_out:
654 f2fs_put_dnode(&dn);
655out:
656 f2fs_put_page(page, 1);
657}
658
659static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
660 unsigned int segno, int off)
661{
662 struct page *page;
663
664 page = get_lock_data_page(inode, bidx, true);
665 if (IS_ERR(page))
666 return;
667
668 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
669 goto out;
670
671 if (gc_type == BG_GC) {
672 if (PageWriteback(page))
673 goto out;
674 set_page_dirty(page);
675 set_cold_data(page);
676 } else {
677 struct f2fs_io_info fio = {
678 .sbi = F2FS_I_SB(inode),
679 .type = DATA,
680 .op = REQ_OP_WRITE,
681 .op_flags = REQ_SYNC,
682 .page = page,
683 .encrypted_page = NULL,
684 };
685 bool is_dirty = PageDirty(page);
686 int err;
687
688retry:
689 set_page_dirty(page);
690 f2fs_wait_on_page_writeback(page, DATA, true);
691 if (clear_page_dirty_for_io(page)) {
692 inode_dec_dirty_pages(inode);
693 remove_dirty_inode(inode);
694 }
695
696 set_cold_data(page);
697
698 err = do_write_data_page(&fio);
699 if (err == -ENOMEM && is_dirty) {
700 congestion_wait(BLK_RW_ASYNC, HZ/50);
701 goto retry;
702 }
703 }
704out:
705 f2fs_put_page(page, 1);
706}
707
708/*
709 * This function tries to get parent node of victim data block, and identifies
710 * data block validity. If the block is valid, copy that with cold status and
711 * modify parent node.
712 * If the parent node is not valid or the data block address is different,
713 * the victim data block is ignored.
714 */
715static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
716 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
717{
718 struct super_block *sb = sbi->sb;
719 struct f2fs_summary *entry;
720 block_t start_addr;
721 int off;
722 int phase = 0;
723
724 start_addr = START_BLOCK(sbi, segno);
725
726next_step:
727 entry = sum;
728
729 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
730 struct page *data_page;
731 struct inode *inode;
732 struct node_info dni; /* dnode info for the data */
733 unsigned int ofs_in_node, nofs;
734 block_t start_bidx;
735 nid_t nid = le32_to_cpu(entry->nid);
736
737 /* stop BG_GC if there is not enough free sections. */
738 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
739 return;
740
741 if (check_valid_map(sbi, segno, off) == 0)
742 continue;
743
744 if (phase == 0) {
745 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
746 META_NAT, true);
747 continue;
748 }
749
750 if (phase == 1) {
751 ra_node_page(sbi, nid);
752 continue;
753 }
754
755 /* Get an inode by ino with checking validity */
756 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
757 continue;
758
759 if (phase == 2) {
760 ra_node_page(sbi, dni.ino);
761 continue;
762 }
763
764 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
765
766 if (phase == 3) {
767 inode = f2fs_iget(sb, dni.ino);
768 if (IS_ERR(inode) || is_bad_inode(inode))
769 continue;
770
771 /* if encrypted inode, let's go phase 3 */
772 if (f2fs_encrypted_inode(inode) &&
773 S_ISREG(inode->i_mode)) {
774 add_gc_inode(gc_list, inode);
775 continue;
776 }
777
778 start_bidx = start_bidx_of_node(nofs, inode);
779 data_page = get_read_data_page(inode,
780 start_bidx + ofs_in_node, REQ_RAHEAD,
781 true);
782 if (IS_ERR(data_page)) {
783 iput(inode);
784 continue;
785 }
786
787 f2fs_put_page(data_page, 0);
788 add_gc_inode(gc_list, inode);
789 continue;
790 }
791
792 /* phase 4 */
793 inode = find_gc_inode(gc_list, dni.ino);
794 if (inode) {
795 struct f2fs_inode_info *fi = F2FS_I(inode);
796 bool locked = false;
797
798 if (S_ISREG(inode->i_mode)) {
799 if (!down_write_trylock(&fi->dio_rwsem[READ]))
800 continue;
801 if (!down_write_trylock(
802 &fi->dio_rwsem[WRITE])) {
803 up_write(&fi->dio_rwsem[READ]);
804 continue;
805 }
806 locked = true;
807 }
808
809 start_bidx = start_bidx_of_node(nofs, inode)
810 + ofs_in_node;
811 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
812 move_encrypted_block(inode, start_bidx, segno, off);
813 else
814 move_data_page(inode, start_bidx, gc_type, segno, off);
815
816 if (locked) {
817 up_write(&fi->dio_rwsem[WRITE]);
818 up_write(&fi->dio_rwsem[READ]);
819 }
820
821 stat_inc_data_blk_count(sbi, 1, gc_type);
822 }
823 }
824
825 if (++phase < 5)
826 goto next_step;
827}
828
829static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
830 int gc_type)
831{
832 struct sit_info *sit_i = SIT_I(sbi);
833 int ret;
834
835 mutex_lock(&sit_i->sentry_lock);
836 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
837 NO_CHECK_TYPE, LFS);
838 mutex_unlock(&sit_i->sentry_lock);
839 return ret;
840}
841
842static int do_garbage_collect(struct f2fs_sb_info *sbi,
843 unsigned int start_segno,
844 struct gc_inode_list *gc_list, int gc_type)
845{
846 struct page *sum_page;
847 struct f2fs_summary_block *sum;
848 struct blk_plug plug;
849 unsigned int segno = start_segno;
850 unsigned int end_segno = start_segno + sbi->segs_per_sec;
851 int sec_freed = 0;
852 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
853 SUM_TYPE_DATA : SUM_TYPE_NODE;
854
855 /* readahead multi ssa blocks those have contiguous address */
856 if (sbi->segs_per_sec > 1)
857 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
858 sbi->segs_per_sec, META_SSA, true);
859
860 /* reference all summary page */
861 while (segno < end_segno) {
862 sum_page = get_sum_page(sbi, segno++);
863 unlock_page(sum_page);
864 }
865
866 blk_start_plug(&plug);
867
868 for (segno = start_segno; segno < end_segno; segno++) {
869
870 /* find segment summary of victim */
871 sum_page = find_get_page(META_MAPPING(sbi),
872 GET_SUM_BLOCK(sbi, segno));
873 f2fs_put_page(sum_page, 0);
874
875 if (get_valid_blocks(sbi, segno, 1) == 0 ||
876 !PageUptodate(sum_page) ||
877 unlikely(f2fs_cp_error(sbi)))
878 goto next;
879
880 sum = page_address(sum_page);
881 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
882
883 /*
884 * this is to avoid deadlock:
885 * - lock_page(sum_page) - f2fs_replace_block
886 * - check_valid_map() - mutex_lock(sentry_lock)
887 * - mutex_lock(sentry_lock) - change_curseg()
888 * - lock_page(sum_page)
889 */
890
891 if (type == SUM_TYPE_NODE)
892 gc_node_segment(sbi, sum->entries, segno, gc_type);
893 else
894 gc_data_segment(sbi, sum->entries, gc_list, segno,
895 gc_type);
896
897 stat_inc_seg_count(sbi, type, gc_type);
898next:
899 f2fs_put_page(sum_page, 0);
900 }
901
902 if (gc_type == FG_GC)
903 f2fs_submit_merged_bio(sbi,
904 (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
905
906 blk_finish_plug(&plug);
907
908 if (gc_type == FG_GC &&
909 get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
910 sec_freed = 1;
911
912 stat_inc_call_count(sbi->stat_info);
913
914 return sec_freed;
915}
916
917int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
918{
919 unsigned int segno;
920 int gc_type = sync ? FG_GC : BG_GC;
921 int sec_freed = 0;
922 int ret = -EINVAL;
923 struct cp_control cpc;
924 struct gc_inode_list gc_list = {
925 .ilist = LIST_HEAD_INIT(gc_list.ilist),
926 .iroot = RADIX_TREE_INIT(GFP_NOFS),
927 };
928
929 cpc.reason = __get_cp_reason(sbi);
930gc_more:
931 segno = NULL_SEGNO;
932
933 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
934 goto stop;
935 if (unlikely(f2fs_cp_error(sbi))) {
936 ret = -EIO;
937 goto stop;
938 }
939
940 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
941 gc_type = FG_GC;
942 /*
943 * If there is no victim and no prefree segment but still not
944 * enough free sections, we should flush dent/node blocks and do
945 * garbage collections.
946 */
947 if (__get_victim(sbi, &segno, gc_type) ||
948 prefree_segments(sbi)) {
949 ret = write_checkpoint(sbi, &cpc);
950 if (ret)
951 goto stop;
952 segno = NULL_SEGNO;
953 } else if (has_not_enough_free_secs(sbi, 0, 0)) {
954 ret = write_checkpoint(sbi, &cpc);
955 if (ret)
956 goto stop;
957 }
958 } else if (gc_type == BG_GC && !background) {
959 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
960 goto stop;
961 }
962
963 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
964 goto stop;
965 ret = 0;
966
967 if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
968 gc_type == FG_GC)
969 sec_freed++;
970
971 if (gc_type == FG_GC)
972 sbi->cur_victim_sec = NULL_SEGNO;
973
974 if (!sync) {
975 if (has_not_enough_free_secs(sbi, sec_freed, 0))
976 goto gc_more;
977
978 if (gc_type == FG_GC)
979 ret = write_checkpoint(sbi, &cpc);
980 }
981stop:
982 mutex_unlock(&sbi->gc_mutex);
983
984 put_gc_inode(&gc_list);
985
986 if (sync)
987 ret = sec_freed ? 0 : -EAGAIN;
988 return ret;
989}
990
991void build_gc_manager(struct f2fs_sb_info *sbi)
992{
993 u64 main_count, resv_count, ovp_count, blocks_per_sec;
994
995 DIRTY_I(sbi)->v_ops = &default_v_ops;
996
997 /* threshold of # of valid blocks in a section for victims of FG_GC */
998 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
999 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1000 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1001 blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
1002
1003 sbi->fggc_threshold = div_u64((main_count - ovp_count) * blocks_per_sec,
1004 (main_count - resv_count));
1005}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/f2fs_fs.h>
12#include <linux/kthread.h>
13#include <linux/delay.h>
14#include <linux/freezer.h>
15#include <linux/sched/signal.h>
16#include <linux/random.h>
17#include <linux/sched/mm.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include "gc.h"
23#include "iostat.h"
24#include <trace/events/f2fs.h>
25
26static struct kmem_cache *victim_entry_slab;
27
28static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
30
31static int gc_thread_func(void *data)
32{
33 struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37 unsigned int wait_ms;
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
42
43 wait_ms = gc_th->min_sleep_time;
44
45 set_freezable();
46 do {
47 bool sync_mode, foreground = false;
48
49 wait_event_interruptible_timeout(*wq,
50 kthread_should_stop() || freezing(current) ||
51 waitqueue_active(fggc_wq) ||
52 gc_th->gc_wake,
53 msecs_to_jiffies(wait_ms));
54
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56 foreground = true;
57
58 /* give it a try one time */
59 if (gc_th->gc_wake)
60 gc_th->gc_wake = 0;
61
62 if (try_to_freeze()) {
63 stat_other_skip_bggc_count(sbi);
64 continue;
65 }
66 if (kthread_should_stop())
67 break;
68
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70 increase_sleep_time(gc_th, &wait_ms);
71 stat_other_skip_bggc_count(sbi);
72 continue;
73 }
74
75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
76 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
77 f2fs_stop_checkpoint(sbi, false,
78 STOP_CP_REASON_FAULT_INJECT);
79 }
80
81 if (!sb_start_write_trylock(sbi->sb)) {
82 stat_other_skip_bggc_count(sbi);
83 continue;
84 }
85
86 /*
87 * [GC triggering condition]
88 * 0. GC is not conducted currently.
89 * 1. There are enough dirty segments.
90 * 2. IO subsystem is idle by checking the # of writeback pages.
91 * 3. IO subsystem is idle by checking the # of requests in
92 * bdev's request list.
93 *
94 * Note) We have to avoid triggering GCs frequently.
95 * Because it is possible that some segments can be
96 * invalidated soon after by user update or deletion.
97 * So, I'd like to wait some time to collect dirty segments.
98 */
99 if (sbi->gc_mode == GC_URGENT_HIGH ||
100 sbi->gc_mode == GC_URGENT_MID) {
101 wait_ms = gc_th->urgent_sleep_time;
102 f2fs_down_write(&sbi->gc_lock);
103 goto do_gc;
104 }
105
106 if (foreground) {
107 f2fs_down_write(&sbi->gc_lock);
108 goto do_gc;
109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
110 stat_other_skip_bggc_count(sbi);
111 goto next;
112 }
113
114 if (!is_idle(sbi, GC_TIME)) {
115 increase_sleep_time(gc_th, &wait_ms);
116 f2fs_up_write(&sbi->gc_lock);
117 stat_io_skip_bggc_count(sbi);
118 goto next;
119 }
120
121 if (has_enough_invalid_blocks(sbi))
122 decrease_sleep_time(gc_th, &wait_ms);
123 else
124 increase_sleep_time(gc_th, &wait_ms);
125do_gc:
126 if (!foreground)
127 stat_inc_bggc_count(sbi->stat_info);
128
129 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
130
131 /* foreground GC was been triggered via f2fs_balance_fs() */
132 if (foreground)
133 sync_mode = false;
134
135 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
136 gc_control.no_bg_gc = foreground;
137 gc_control.nr_free_secs = foreground ? 1 : 0;
138
139 /* if return value is not zero, no victim was selected */
140 if (f2fs_gc(sbi, &gc_control)) {
141 /* don't bother wait_ms by foreground gc */
142 if (!foreground)
143 wait_ms = gc_th->no_gc_sleep_time;
144 } else {
145 /* reset wait_ms to default sleep time */
146 if (wait_ms == gc_th->no_gc_sleep_time)
147 wait_ms = gc_th->min_sleep_time;
148 }
149
150 if (foreground)
151 wake_up_all(&gc_th->fggc_wq);
152
153 trace_f2fs_background_gc(sbi->sb, wait_ms,
154 prefree_segments(sbi), free_segments(sbi));
155
156 /* balancing f2fs's metadata periodically */
157 f2fs_balance_fs_bg(sbi, true);
158next:
159 if (sbi->gc_mode != GC_NORMAL) {
160 spin_lock(&sbi->gc_remaining_trials_lock);
161 if (sbi->gc_remaining_trials) {
162 sbi->gc_remaining_trials--;
163 if (!sbi->gc_remaining_trials)
164 sbi->gc_mode = GC_NORMAL;
165 }
166 spin_unlock(&sbi->gc_remaining_trials_lock);
167 }
168 sb_end_write(sbi->sb);
169
170 } while (!kthread_should_stop());
171 return 0;
172}
173
174int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
175{
176 struct f2fs_gc_kthread *gc_th;
177 dev_t dev = sbi->sb->s_bdev->bd_dev;
178
179 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
180 if (!gc_th)
181 return -ENOMEM;
182
183 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
184 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
185 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
186 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
187
188 gc_th->gc_wake = 0;
189
190 sbi->gc_thread = gc_th;
191 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
192 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
193 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
194 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
195 if (IS_ERR(gc_th->f2fs_gc_task)) {
196 int err = PTR_ERR(gc_th->f2fs_gc_task);
197
198 kfree(gc_th);
199 sbi->gc_thread = NULL;
200 return err;
201 }
202
203 return 0;
204}
205
206void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
207{
208 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
209
210 if (!gc_th)
211 return;
212 kthread_stop(gc_th->f2fs_gc_task);
213 wake_up_all(&gc_th->fggc_wq);
214 kfree(gc_th);
215 sbi->gc_thread = NULL;
216}
217
218static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
219{
220 int gc_mode;
221
222 if (gc_type == BG_GC) {
223 if (sbi->am.atgc_enabled)
224 gc_mode = GC_AT;
225 else
226 gc_mode = GC_CB;
227 } else {
228 gc_mode = GC_GREEDY;
229 }
230
231 switch (sbi->gc_mode) {
232 case GC_IDLE_CB:
233 gc_mode = GC_CB;
234 break;
235 case GC_IDLE_GREEDY:
236 case GC_URGENT_HIGH:
237 gc_mode = GC_GREEDY;
238 break;
239 case GC_IDLE_AT:
240 gc_mode = GC_AT;
241 break;
242 }
243
244 return gc_mode;
245}
246
247static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
248 int type, struct victim_sel_policy *p)
249{
250 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
251
252 if (p->alloc_mode == SSR) {
253 p->gc_mode = GC_GREEDY;
254 p->dirty_bitmap = dirty_i->dirty_segmap[type];
255 p->max_search = dirty_i->nr_dirty[type];
256 p->ofs_unit = 1;
257 } else if (p->alloc_mode == AT_SSR) {
258 p->gc_mode = GC_GREEDY;
259 p->dirty_bitmap = dirty_i->dirty_segmap[type];
260 p->max_search = dirty_i->nr_dirty[type];
261 p->ofs_unit = 1;
262 } else {
263 p->gc_mode = select_gc_type(sbi, gc_type);
264 p->ofs_unit = sbi->segs_per_sec;
265 if (__is_large_section(sbi)) {
266 p->dirty_bitmap = dirty_i->dirty_secmap;
267 p->max_search = count_bits(p->dirty_bitmap,
268 0, MAIN_SECS(sbi));
269 } else {
270 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
271 p->max_search = dirty_i->nr_dirty[DIRTY];
272 }
273 }
274
275 /*
276 * adjust candidates range, should select all dirty segments for
277 * foreground GC and urgent GC cases.
278 */
279 if (gc_type != FG_GC &&
280 (sbi->gc_mode != GC_URGENT_HIGH) &&
281 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
282 p->max_search > sbi->max_victim_search)
283 p->max_search = sbi->max_victim_search;
284
285 /* let's select beginning hot/small space first in no_heap mode*/
286 if (f2fs_need_rand_seg(sbi))
287 p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
288 else if (test_opt(sbi, NOHEAP) &&
289 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
290 p->offset = 0;
291 else
292 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
293}
294
295static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
296 struct victim_sel_policy *p)
297{
298 /* SSR allocates in a segment unit */
299 if (p->alloc_mode == SSR)
300 return sbi->blocks_per_seg;
301 else if (p->alloc_mode == AT_SSR)
302 return UINT_MAX;
303
304 /* LFS */
305 if (p->gc_mode == GC_GREEDY)
306 return 2 * sbi->blocks_per_seg * p->ofs_unit;
307 else if (p->gc_mode == GC_CB)
308 return UINT_MAX;
309 else if (p->gc_mode == GC_AT)
310 return UINT_MAX;
311 else /* No other gc_mode */
312 return 0;
313}
314
315static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
316{
317 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
318 unsigned int secno;
319
320 /*
321 * If the gc_type is FG_GC, we can select victim segments
322 * selected by background GC before.
323 * Those segments guarantee they have small valid blocks.
324 */
325 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
326 if (sec_usage_check(sbi, secno))
327 continue;
328 clear_bit(secno, dirty_i->victim_secmap);
329 return GET_SEG_FROM_SEC(sbi, secno);
330 }
331 return NULL_SEGNO;
332}
333
334static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
335{
336 struct sit_info *sit_i = SIT_I(sbi);
337 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
338 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
339 unsigned long long mtime = 0;
340 unsigned int vblocks;
341 unsigned char age = 0;
342 unsigned char u;
343 unsigned int i;
344 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
345
346 for (i = 0; i < usable_segs_per_sec; i++)
347 mtime += get_seg_entry(sbi, start + i)->mtime;
348 vblocks = get_valid_blocks(sbi, segno, true);
349
350 mtime = div_u64(mtime, usable_segs_per_sec);
351 vblocks = div_u64(vblocks, usable_segs_per_sec);
352
353 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
354
355 /* Handle if the system time has changed by the user */
356 if (mtime < sit_i->min_mtime)
357 sit_i->min_mtime = mtime;
358 if (mtime > sit_i->max_mtime)
359 sit_i->max_mtime = mtime;
360 if (sit_i->max_mtime != sit_i->min_mtime)
361 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
362 sit_i->max_mtime - sit_i->min_mtime);
363
364 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
365}
366
367static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
368 unsigned int segno, struct victim_sel_policy *p)
369{
370 if (p->alloc_mode == SSR)
371 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
372
373 /* alloc_mode == LFS */
374 if (p->gc_mode == GC_GREEDY)
375 return get_valid_blocks(sbi, segno, true);
376 else if (p->gc_mode == GC_CB)
377 return get_cb_cost(sbi, segno);
378
379 f2fs_bug_on(sbi, 1);
380 return 0;
381}
382
383static unsigned int count_bits(const unsigned long *addr,
384 unsigned int offset, unsigned int len)
385{
386 unsigned int end = offset + len, sum = 0;
387
388 while (offset < end) {
389 if (test_bit(offset++, addr))
390 ++sum;
391 }
392 return sum;
393}
394
395static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
396 unsigned long long mtime, unsigned int segno,
397 struct rb_node *parent, struct rb_node **p,
398 bool left_most)
399{
400 struct atgc_management *am = &sbi->am;
401 struct victim_entry *ve;
402
403 ve = f2fs_kmem_cache_alloc(victim_entry_slab,
404 GFP_NOFS, true, NULL);
405
406 ve->mtime = mtime;
407 ve->segno = segno;
408
409 rb_link_node(&ve->rb_node, parent, p);
410 rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
411
412 list_add_tail(&ve->list, &am->victim_list);
413
414 am->victim_count++;
415
416 return ve;
417}
418
419static void insert_victim_entry(struct f2fs_sb_info *sbi,
420 unsigned long long mtime, unsigned int segno)
421{
422 struct atgc_management *am = &sbi->am;
423 struct rb_node **p;
424 struct rb_node *parent = NULL;
425 bool left_most = true;
426
427 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
428 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
429}
430
431static void add_victim_entry(struct f2fs_sb_info *sbi,
432 struct victim_sel_policy *p, unsigned int segno)
433{
434 struct sit_info *sit_i = SIT_I(sbi);
435 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
436 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
437 unsigned long long mtime = 0;
438 unsigned int i;
439
440 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
441 if (p->gc_mode == GC_AT &&
442 get_valid_blocks(sbi, segno, true) == 0)
443 return;
444 }
445
446 for (i = 0; i < sbi->segs_per_sec; i++)
447 mtime += get_seg_entry(sbi, start + i)->mtime;
448 mtime = div_u64(mtime, sbi->segs_per_sec);
449
450 /* Handle if the system time has changed by the user */
451 if (mtime < sit_i->min_mtime)
452 sit_i->min_mtime = mtime;
453 if (mtime > sit_i->max_mtime)
454 sit_i->max_mtime = mtime;
455 if (mtime < sit_i->dirty_min_mtime)
456 sit_i->dirty_min_mtime = mtime;
457 if (mtime > sit_i->dirty_max_mtime)
458 sit_i->dirty_max_mtime = mtime;
459
460 /* don't choose young section as candidate */
461 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
462 return;
463
464 insert_victim_entry(sbi, mtime, segno);
465}
466
467static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
468 struct victim_sel_policy *p)
469{
470 struct atgc_management *am = &sbi->am;
471 struct rb_node *parent = NULL;
472 bool left_most;
473
474 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
475
476 return parent;
477}
478
479static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
480 struct victim_sel_policy *p)
481{
482 struct sit_info *sit_i = SIT_I(sbi);
483 struct atgc_management *am = &sbi->am;
484 struct rb_root_cached *root = &am->root;
485 struct rb_node *node;
486 struct rb_entry *re;
487 struct victim_entry *ve;
488 unsigned long long total_time;
489 unsigned long long age, u, accu;
490 unsigned long long max_mtime = sit_i->dirty_max_mtime;
491 unsigned long long min_mtime = sit_i->dirty_min_mtime;
492 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
493 unsigned int vblocks;
494 unsigned int dirty_threshold = max(am->max_candidate_count,
495 am->candidate_ratio *
496 am->victim_count / 100);
497 unsigned int age_weight = am->age_weight;
498 unsigned int cost;
499 unsigned int iter = 0;
500
501 if (max_mtime < min_mtime)
502 return;
503
504 max_mtime += 1;
505 total_time = max_mtime - min_mtime;
506
507 accu = div64_u64(ULLONG_MAX, total_time);
508 accu = min_t(unsigned long long, div_u64(accu, 100),
509 DEFAULT_ACCURACY_CLASS);
510
511 node = rb_first_cached(root);
512next:
513 re = rb_entry_safe(node, struct rb_entry, rb_node);
514 if (!re)
515 return;
516
517 ve = (struct victim_entry *)re;
518
519 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
520 goto skip;
521
522 /* age = 10000 * x% * 60 */
523 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
524 age_weight;
525
526 vblocks = get_valid_blocks(sbi, ve->segno, true);
527 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
528
529 /* u = 10000 * x% * 40 */
530 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
531 (100 - age_weight);
532
533 f2fs_bug_on(sbi, age + u >= UINT_MAX);
534
535 cost = UINT_MAX - (age + u);
536 iter++;
537
538 if (cost < p->min_cost ||
539 (cost == p->min_cost && age > p->oldest_age)) {
540 p->min_cost = cost;
541 p->oldest_age = age;
542 p->min_segno = ve->segno;
543 }
544skip:
545 if (iter < dirty_threshold) {
546 node = rb_next(node);
547 goto next;
548 }
549}
550
551/*
552 * select candidates around source section in range of
553 * [target - dirty_threshold, target + dirty_threshold]
554 */
555static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
556 struct victim_sel_policy *p)
557{
558 struct sit_info *sit_i = SIT_I(sbi);
559 struct atgc_management *am = &sbi->am;
560 struct rb_node *node;
561 struct rb_entry *re;
562 struct victim_entry *ve;
563 unsigned long long age;
564 unsigned long long max_mtime = sit_i->dirty_max_mtime;
565 unsigned long long min_mtime = sit_i->dirty_min_mtime;
566 unsigned int seg_blocks = sbi->blocks_per_seg;
567 unsigned int vblocks;
568 unsigned int dirty_threshold = max(am->max_candidate_count,
569 am->candidate_ratio *
570 am->victim_count / 100);
571 unsigned int cost;
572 unsigned int iter = 0;
573 int stage = 0;
574
575 if (max_mtime < min_mtime)
576 return;
577 max_mtime += 1;
578next_stage:
579 node = lookup_central_victim(sbi, p);
580next_node:
581 re = rb_entry_safe(node, struct rb_entry, rb_node);
582 if (!re) {
583 if (stage == 0)
584 goto skip_stage;
585 return;
586 }
587
588 ve = (struct victim_entry *)re;
589
590 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
591 goto skip_node;
592
593 age = max_mtime - ve->mtime;
594
595 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
596 f2fs_bug_on(sbi, !vblocks);
597
598 /* rare case */
599 if (vblocks == seg_blocks)
600 goto skip_node;
601
602 iter++;
603
604 age = max_mtime - abs(p->age - age);
605 cost = UINT_MAX - vblocks;
606
607 if (cost < p->min_cost ||
608 (cost == p->min_cost && age > p->oldest_age)) {
609 p->min_cost = cost;
610 p->oldest_age = age;
611 p->min_segno = ve->segno;
612 }
613skip_node:
614 if (iter < dirty_threshold) {
615 if (stage == 0)
616 node = rb_prev(node);
617 else if (stage == 1)
618 node = rb_next(node);
619 goto next_node;
620 }
621skip_stage:
622 if (stage < 1) {
623 stage++;
624 iter = 0;
625 goto next_stage;
626 }
627}
628static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
629 struct victim_sel_policy *p)
630{
631 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
632 &sbi->am.root, true));
633
634 if (p->gc_mode == GC_AT)
635 atgc_lookup_victim(sbi, p);
636 else if (p->alloc_mode == AT_SSR)
637 atssr_lookup_victim(sbi, p);
638 else
639 f2fs_bug_on(sbi, 1);
640}
641
642static void release_victim_entry(struct f2fs_sb_info *sbi)
643{
644 struct atgc_management *am = &sbi->am;
645 struct victim_entry *ve, *tmp;
646
647 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
648 list_del(&ve->list);
649 kmem_cache_free(victim_entry_slab, ve);
650 am->victim_count--;
651 }
652
653 am->root = RB_ROOT_CACHED;
654
655 f2fs_bug_on(sbi, am->victim_count);
656 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
657}
658
659static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
660{
661 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
662 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
663
664 if (!dirty_i->enable_pin_section)
665 return false;
666 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
667 dirty_i->pinned_secmap_cnt++;
668 return true;
669}
670
671static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
672{
673 return dirty_i->pinned_secmap_cnt;
674}
675
676static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
677 unsigned int secno)
678{
679 return dirty_i->enable_pin_section &&
680 f2fs_pinned_section_exists(dirty_i) &&
681 test_bit(secno, dirty_i->pinned_secmap);
682}
683
684static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
685{
686 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
687
688 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
689 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
690 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
691 }
692 DIRTY_I(sbi)->enable_pin_section = enable;
693}
694
695static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
696 unsigned int segno)
697{
698 if (!f2fs_is_pinned_file(inode))
699 return 0;
700 if (gc_type != FG_GC)
701 return -EBUSY;
702 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
703 f2fs_pin_file_control(inode, true);
704 return -EAGAIN;
705}
706
707/*
708 * This function is called from two paths.
709 * One is garbage collection and the other is SSR segment selection.
710 * When it is called during GC, it just gets a victim segment
711 * and it does not remove it from dirty seglist.
712 * When it is called from SSR segment selection, it finds a segment
713 * which has minimum valid blocks and removes it from dirty seglist.
714 */
715static int get_victim_by_default(struct f2fs_sb_info *sbi,
716 unsigned int *result, int gc_type, int type,
717 char alloc_mode, unsigned long long age)
718{
719 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
720 struct sit_info *sm = SIT_I(sbi);
721 struct victim_sel_policy p;
722 unsigned int secno, last_victim;
723 unsigned int last_segment;
724 unsigned int nsearched;
725 bool is_atgc;
726 int ret = 0;
727
728 mutex_lock(&dirty_i->seglist_lock);
729 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
730
731 p.alloc_mode = alloc_mode;
732 p.age = age;
733 p.age_threshold = sbi->am.age_threshold;
734
735retry:
736 select_policy(sbi, gc_type, type, &p);
737 p.min_segno = NULL_SEGNO;
738 p.oldest_age = 0;
739 p.min_cost = get_max_cost(sbi, &p);
740
741 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
742 nsearched = 0;
743
744 if (is_atgc)
745 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
746
747 if (*result != NULL_SEGNO) {
748 if (!get_valid_blocks(sbi, *result, false)) {
749 ret = -ENODATA;
750 goto out;
751 }
752
753 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
754 ret = -EBUSY;
755 else
756 p.min_segno = *result;
757 goto out;
758 }
759
760 ret = -ENODATA;
761 if (p.max_search == 0)
762 goto out;
763
764 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
765 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
766 p.min_segno = sbi->next_victim_seg[BG_GC];
767 *result = p.min_segno;
768 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
769 goto got_result;
770 }
771 if (gc_type == FG_GC &&
772 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
773 p.min_segno = sbi->next_victim_seg[FG_GC];
774 *result = p.min_segno;
775 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
776 goto got_result;
777 }
778 }
779
780 last_victim = sm->last_victim[p.gc_mode];
781 if (p.alloc_mode == LFS && gc_type == FG_GC) {
782 p.min_segno = check_bg_victims(sbi);
783 if (p.min_segno != NULL_SEGNO)
784 goto got_it;
785 }
786
787 while (1) {
788 unsigned long cost, *dirty_bitmap;
789 unsigned int unit_no, segno;
790
791 dirty_bitmap = p.dirty_bitmap;
792 unit_no = find_next_bit(dirty_bitmap,
793 last_segment / p.ofs_unit,
794 p.offset / p.ofs_unit);
795 segno = unit_no * p.ofs_unit;
796 if (segno >= last_segment) {
797 if (sm->last_victim[p.gc_mode]) {
798 last_segment =
799 sm->last_victim[p.gc_mode];
800 sm->last_victim[p.gc_mode] = 0;
801 p.offset = 0;
802 continue;
803 }
804 break;
805 }
806
807 p.offset = segno + p.ofs_unit;
808 nsearched++;
809
810#ifdef CONFIG_F2FS_CHECK_FS
811 /*
812 * skip selecting the invalid segno (that is failed due to block
813 * validity check failure during GC) to avoid endless GC loop in
814 * such cases.
815 */
816 if (test_bit(segno, sm->invalid_segmap))
817 goto next;
818#endif
819
820 secno = GET_SEC_FROM_SEG(sbi, segno);
821
822 if (sec_usage_check(sbi, secno))
823 goto next;
824
825 /* Don't touch checkpointed data */
826 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
827 if (p.alloc_mode == LFS) {
828 /*
829 * LFS is set to find source section during GC.
830 * The victim should have no checkpointed data.
831 */
832 if (get_ckpt_valid_blocks(sbi, segno, true))
833 goto next;
834 } else {
835 /*
836 * SSR | AT_SSR are set to find target segment
837 * for writes which can be full by checkpointed
838 * and newly written blocks.
839 */
840 if (!f2fs_segment_has_free_slot(sbi, segno))
841 goto next;
842 }
843 }
844
845 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
846 goto next;
847
848 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
849 goto next;
850
851 if (is_atgc) {
852 add_victim_entry(sbi, &p, segno);
853 goto next;
854 }
855
856 cost = get_gc_cost(sbi, segno, &p);
857
858 if (p.min_cost > cost) {
859 p.min_segno = segno;
860 p.min_cost = cost;
861 }
862next:
863 if (nsearched >= p.max_search) {
864 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
865 sm->last_victim[p.gc_mode] =
866 last_victim + p.ofs_unit;
867 else
868 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
869 sm->last_victim[p.gc_mode] %=
870 (MAIN_SECS(sbi) * sbi->segs_per_sec);
871 break;
872 }
873 }
874
875 /* get victim for GC_AT/AT_SSR */
876 if (is_atgc) {
877 lookup_victim_by_age(sbi, &p);
878 release_victim_entry(sbi);
879 }
880
881 if (is_atgc && p.min_segno == NULL_SEGNO &&
882 sm->elapsed_time < p.age_threshold) {
883 p.age_threshold = 0;
884 goto retry;
885 }
886
887 if (p.min_segno != NULL_SEGNO) {
888got_it:
889 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
890got_result:
891 if (p.alloc_mode == LFS) {
892 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
893 if (gc_type == FG_GC)
894 sbi->cur_victim_sec = secno;
895 else
896 set_bit(secno, dirty_i->victim_secmap);
897 }
898 ret = 0;
899
900 }
901out:
902 if (p.min_segno != NULL_SEGNO)
903 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
904 sbi->cur_victim_sec,
905 prefree_segments(sbi), free_segments(sbi));
906 mutex_unlock(&dirty_i->seglist_lock);
907
908 return ret;
909}
910
911static const struct victim_selection default_v_ops = {
912 .get_victim = get_victim_by_default,
913};
914
915static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
916{
917 struct inode_entry *ie;
918
919 ie = radix_tree_lookup(&gc_list->iroot, ino);
920 if (ie)
921 return ie->inode;
922 return NULL;
923}
924
925static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
926{
927 struct inode_entry *new_ie;
928
929 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
930 iput(inode);
931 return;
932 }
933 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
934 GFP_NOFS, true, NULL);
935 new_ie->inode = inode;
936
937 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
938 list_add_tail(&new_ie->list, &gc_list->ilist);
939}
940
941static void put_gc_inode(struct gc_inode_list *gc_list)
942{
943 struct inode_entry *ie, *next_ie;
944
945 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
946 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
947 iput(ie->inode);
948 list_del(&ie->list);
949 kmem_cache_free(f2fs_inode_entry_slab, ie);
950 }
951}
952
953static int check_valid_map(struct f2fs_sb_info *sbi,
954 unsigned int segno, int offset)
955{
956 struct sit_info *sit_i = SIT_I(sbi);
957 struct seg_entry *sentry;
958 int ret;
959
960 down_read(&sit_i->sentry_lock);
961 sentry = get_seg_entry(sbi, segno);
962 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
963 up_read(&sit_i->sentry_lock);
964 return ret;
965}
966
967/*
968 * This function compares node address got in summary with that in NAT.
969 * On validity, copy that node with cold status, otherwise (invalid node)
970 * ignore that.
971 */
972static int gc_node_segment(struct f2fs_sb_info *sbi,
973 struct f2fs_summary *sum, unsigned int segno, int gc_type)
974{
975 struct f2fs_summary *entry;
976 block_t start_addr;
977 int off;
978 int phase = 0;
979 bool fggc = (gc_type == FG_GC);
980 int submitted = 0;
981 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
982
983 start_addr = START_BLOCK(sbi, segno);
984
985next_step:
986 entry = sum;
987
988 if (fggc && phase == 2)
989 atomic_inc(&sbi->wb_sync_req[NODE]);
990
991 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
992 nid_t nid = le32_to_cpu(entry->nid);
993 struct page *node_page;
994 struct node_info ni;
995 int err;
996
997 /* stop BG_GC if there is not enough free sections. */
998 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
999 return submitted;
1000
1001 if (check_valid_map(sbi, segno, off) == 0)
1002 continue;
1003
1004 if (phase == 0) {
1005 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1006 META_NAT, true);
1007 continue;
1008 }
1009
1010 if (phase == 1) {
1011 f2fs_ra_node_page(sbi, nid);
1012 continue;
1013 }
1014
1015 /* phase == 2 */
1016 node_page = f2fs_get_node_page(sbi, nid);
1017 if (IS_ERR(node_page))
1018 continue;
1019
1020 /* block may become invalid during f2fs_get_node_page */
1021 if (check_valid_map(sbi, segno, off) == 0) {
1022 f2fs_put_page(node_page, 1);
1023 continue;
1024 }
1025
1026 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1027 f2fs_put_page(node_page, 1);
1028 continue;
1029 }
1030
1031 if (ni.blk_addr != start_addr + off) {
1032 f2fs_put_page(node_page, 1);
1033 continue;
1034 }
1035
1036 err = f2fs_move_node_page(node_page, gc_type);
1037 if (!err && gc_type == FG_GC)
1038 submitted++;
1039 stat_inc_node_blk_count(sbi, 1, gc_type);
1040 }
1041
1042 if (++phase < 3)
1043 goto next_step;
1044
1045 if (fggc)
1046 atomic_dec(&sbi->wb_sync_req[NODE]);
1047 return submitted;
1048}
1049
1050/*
1051 * Calculate start block index indicating the given node offset.
1052 * Be careful, caller should give this node offset only indicating direct node
1053 * blocks. If any node offsets, which point the other types of node blocks such
1054 * as indirect or double indirect node blocks, are given, it must be a caller's
1055 * bug.
1056 */
1057block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1058{
1059 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1060 unsigned int bidx;
1061
1062 if (node_ofs == 0)
1063 return 0;
1064
1065 if (node_ofs <= 2) {
1066 bidx = node_ofs - 1;
1067 } else if (node_ofs <= indirect_blks) {
1068 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1069
1070 bidx = node_ofs - 2 - dec;
1071 } else {
1072 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1073
1074 bidx = node_ofs - 5 - dec;
1075 }
1076 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1077}
1078
1079static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1080 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1081{
1082 struct page *node_page;
1083 nid_t nid;
1084 unsigned int ofs_in_node, max_addrs, base;
1085 block_t source_blkaddr;
1086
1087 nid = le32_to_cpu(sum->nid);
1088 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1089
1090 node_page = f2fs_get_node_page(sbi, nid);
1091 if (IS_ERR(node_page))
1092 return false;
1093
1094 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1095 f2fs_put_page(node_page, 1);
1096 return false;
1097 }
1098
1099 if (sum->version != dni->version) {
1100 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1101 __func__);
1102 set_sbi_flag(sbi, SBI_NEED_FSCK);
1103 }
1104
1105 if (f2fs_check_nid_range(sbi, dni->ino)) {
1106 f2fs_put_page(node_page, 1);
1107 return false;
1108 }
1109
1110 if (IS_INODE(node_page)) {
1111 base = offset_in_addr(F2FS_INODE(node_page));
1112 max_addrs = DEF_ADDRS_PER_INODE;
1113 } else {
1114 base = 0;
1115 max_addrs = DEF_ADDRS_PER_BLOCK;
1116 }
1117
1118 if (base + ofs_in_node >= max_addrs) {
1119 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1120 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1121 f2fs_put_page(node_page, 1);
1122 return false;
1123 }
1124
1125 *nofs = ofs_of_node(node_page);
1126 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1127 f2fs_put_page(node_page, 1);
1128
1129 if (source_blkaddr != blkaddr) {
1130#ifdef CONFIG_F2FS_CHECK_FS
1131 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1132 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1133
1134 if (unlikely(check_valid_map(sbi, segno, offset))) {
1135 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1136 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1137 blkaddr, source_blkaddr, segno);
1138 set_sbi_flag(sbi, SBI_NEED_FSCK);
1139 }
1140 }
1141#endif
1142 return false;
1143 }
1144 return true;
1145}
1146
1147static int ra_data_block(struct inode *inode, pgoff_t index)
1148{
1149 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1150 struct address_space *mapping = inode->i_mapping;
1151 struct dnode_of_data dn;
1152 struct page *page;
1153 struct extent_info ei = {0, };
1154 struct f2fs_io_info fio = {
1155 .sbi = sbi,
1156 .ino = inode->i_ino,
1157 .type = DATA,
1158 .temp = COLD,
1159 .op = REQ_OP_READ,
1160 .op_flags = 0,
1161 .encrypted_page = NULL,
1162 .in_list = false,
1163 .retry = false,
1164 };
1165 int err;
1166
1167 page = f2fs_grab_cache_page(mapping, index, true);
1168 if (!page)
1169 return -ENOMEM;
1170
1171 if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
1172 dn.data_blkaddr = ei.blk + index - ei.fofs;
1173 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1174 DATA_GENERIC_ENHANCE_READ))) {
1175 err = -EFSCORRUPTED;
1176 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1177 goto put_page;
1178 }
1179 goto got_it;
1180 }
1181
1182 set_new_dnode(&dn, inode, NULL, NULL, 0);
1183 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1184 if (err)
1185 goto put_page;
1186 f2fs_put_dnode(&dn);
1187
1188 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1189 err = -ENOENT;
1190 goto put_page;
1191 }
1192 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1193 DATA_GENERIC_ENHANCE))) {
1194 err = -EFSCORRUPTED;
1195 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1196 goto put_page;
1197 }
1198got_it:
1199 /* read page */
1200 fio.page = page;
1201 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1202
1203 /*
1204 * don't cache encrypted data into meta inode until previous dirty
1205 * data were writebacked to avoid racing between GC and flush.
1206 */
1207 f2fs_wait_on_page_writeback(page, DATA, true, true);
1208
1209 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1210
1211 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1212 dn.data_blkaddr,
1213 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1214 if (!fio.encrypted_page) {
1215 err = -ENOMEM;
1216 goto put_page;
1217 }
1218
1219 err = f2fs_submit_page_bio(&fio);
1220 if (err)
1221 goto put_encrypted_page;
1222 f2fs_put_page(fio.encrypted_page, 0);
1223 f2fs_put_page(page, 1);
1224
1225 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1226 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1227
1228 return 0;
1229put_encrypted_page:
1230 f2fs_put_page(fio.encrypted_page, 1);
1231put_page:
1232 f2fs_put_page(page, 1);
1233 return err;
1234}
1235
1236/*
1237 * Move data block via META_MAPPING while keeping locked data page.
1238 * This can be used to move blocks, aka LBAs, directly on disk.
1239 */
1240static int move_data_block(struct inode *inode, block_t bidx,
1241 int gc_type, unsigned int segno, int off)
1242{
1243 struct f2fs_io_info fio = {
1244 .sbi = F2FS_I_SB(inode),
1245 .ino = inode->i_ino,
1246 .type = DATA,
1247 .temp = COLD,
1248 .op = REQ_OP_READ,
1249 .op_flags = 0,
1250 .encrypted_page = NULL,
1251 .in_list = false,
1252 .retry = false,
1253 };
1254 struct dnode_of_data dn;
1255 struct f2fs_summary sum;
1256 struct node_info ni;
1257 struct page *page, *mpage;
1258 block_t newaddr;
1259 int err = 0;
1260 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1261 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1262 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1263 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1264
1265 /* do not read out */
1266 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1267 if (!page)
1268 return -ENOMEM;
1269
1270 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1271 err = -ENOENT;
1272 goto out;
1273 }
1274
1275 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1276 if (err)
1277 goto out;
1278
1279 set_new_dnode(&dn, inode, NULL, NULL, 0);
1280 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1281 if (err)
1282 goto out;
1283
1284 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1285 ClearPageUptodate(page);
1286 err = -ENOENT;
1287 goto put_out;
1288 }
1289
1290 /*
1291 * don't cache encrypted data into meta inode until previous dirty
1292 * data were writebacked to avoid racing between GC and flush.
1293 */
1294 f2fs_wait_on_page_writeback(page, DATA, true, true);
1295
1296 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1297
1298 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1299 if (err)
1300 goto put_out;
1301
1302 /* read page */
1303 fio.page = page;
1304 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1305
1306 if (lfs_mode)
1307 f2fs_down_write(&fio.sbi->io_order_lock);
1308
1309 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1310 fio.old_blkaddr, false);
1311 if (!mpage) {
1312 err = -ENOMEM;
1313 goto up_out;
1314 }
1315
1316 fio.encrypted_page = mpage;
1317
1318 /* read source block in mpage */
1319 if (!PageUptodate(mpage)) {
1320 err = f2fs_submit_page_bio(&fio);
1321 if (err) {
1322 f2fs_put_page(mpage, 1);
1323 goto up_out;
1324 }
1325
1326 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1327 F2FS_BLKSIZE);
1328 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1329 F2FS_BLKSIZE);
1330
1331 lock_page(mpage);
1332 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1333 !PageUptodate(mpage))) {
1334 err = -EIO;
1335 f2fs_put_page(mpage, 1);
1336 goto up_out;
1337 }
1338 }
1339
1340 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1341
1342 /* allocate block address */
1343 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1344 &sum, type, NULL);
1345
1346 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1347 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1348 if (!fio.encrypted_page) {
1349 err = -ENOMEM;
1350 f2fs_put_page(mpage, 1);
1351 goto recover_block;
1352 }
1353
1354 /* write target block */
1355 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1356 memcpy(page_address(fio.encrypted_page),
1357 page_address(mpage), PAGE_SIZE);
1358 f2fs_put_page(mpage, 1);
1359 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1360 fio.old_blkaddr, fio.old_blkaddr);
1361 f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
1362
1363 set_page_dirty(fio.encrypted_page);
1364 if (clear_page_dirty_for_io(fio.encrypted_page))
1365 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1366
1367 set_page_writeback(fio.encrypted_page);
1368 ClearPageError(page);
1369
1370 fio.op = REQ_OP_WRITE;
1371 fio.op_flags = REQ_SYNC;
1372 fio.new_blkaddr = newaddr;
1373 f2fs_submit_page_write(&fio);
1374 if (fio.retry) {
1375 err = -EAGAIN;
1376 if (PageWriteback(fio.encrypted_page))
1377 end_page_writeback(fio.encrypted_page);
1378 goto put_page_out;
1379 }
1380
1381 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1382
1383 f2fs_update_data_blkaddr(&dn, newaddr);
1384 set_inode_flag(inode, FI_APPEND_WRITE);
1385 if (page->index == 0)
1386 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1387put_page_out:
1388 f2fs_put_page(fio.encrypted_page, 1);
1389recover_block:
1390 if (err)
1391 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1392 true, true, true);
1393up_out:
1394 if (lfs_mode)
1395 f2fs_up_write(&fio.sbi->io_order_lock);
1396put_out:
1397 f2fs_put_dnode(&dn);
1398out:
1399 f2fs_put_page(page, 1);
1400 return err;
1401}
1402
1403static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1404 unsigned int segno, int off)
1405{
1406 struct page *page;
1407 int err = 0;
1408
1409 page = f2fs_get_lock_data_page(inode, bidx, true);
1410 if (IS_ERR(page))
1411 return PTR_ERR(page);
1412
1413 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1414 err = -ENOENT;
1415 goto out;
1416 }
1417
1418 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1419 if (err)
1420 goto out;
1421
1422 if (gc_type == BG_GC) {
1423 if (PageWriteback(page)) {
1424 err = -EAGAIN;
1425 goto out;
1426 }
1427 set_page_dirty(page);
1428 set_page_private_gcing(page);
1429 } else {
1430 struct f2fs_io_info fio = {
1431 .sbi = F2FS_I_SB(inode),
1432 .ino = inode->i_ino,
1433 .type = DATA,
1434 .temp = COLD,
1435 .op = REQ_OP_WRITE,
1436 .op_flags = REQ_SYNC,
1437 .old_blkaddr = NULL_ADDR,
1438 .page = page,
1439 .encrypted_page = NULL,
1440 .need_lock = LOCK_REQ,
1441 .io_type = FS_GC_DATA_IO,
1442 };
1443 bool is_dirty = PageDirty(page);
1444
1445retry:
1446 f2fs_wait_on_page_writeback(page, DATA, true, true);
1447
1448 set_page_dirty(page);
1449 if (clear_page_dirty_for_io(page)) {
1450 inode_dec_dirty_pages(inode);
1451 f2fs_remove_dirty_inode(inode);
1452 }
1453
1454 set_page_private_gcing(page);
1455
1456 err = f2fs_do_write_data_page(&fio);
1457 if (err) {
1458 clear_page_private_gcing(page);
1459 if (err == -ENOMEM) {
1460 memalloc_retry_wait(GFP_NOFS);
1461 goto retry;
1462 }
1463 if (is_dirty)
1464 set_page_dirty(page);
1465 }
1466 }
1467out:
1468 f2fs_put_page(page, 1);
1469 return err;
1470}
1471
1472/*
1473 * This function tries to get parent node of victim data block, and identifies
1474 * data block validity. If the block is valid, copy that with cold status and
1475 * modify parent node.
1476 * If the parent node is not valid or the data block address is different,
1477 * the victim data block is ignored.
1478 */
1479static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1480 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1481 bool force_migrate)
1482{
1483 struct super_block *sb = sbi->sb;
1484 struct f2fs_summary *entry;
1485 block_t start_addr;
1486 int off;
1487 int phase = 0;
1488 int submitted = 0;
1489 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1490
1491 start_addr = START_BLOCK(sbi, segno);
1492
1493next_step:
1494 entry = sum;
1495
1496 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1497 struct page *data_page;
1498 struct inode *inode;
1499 struct node_info dni; /* dnode info for the data */
1500 unsigned int ofs_in_node, nofs;
1501 block_t start_bidx;
1502 nid_t nid = le32_to_cpu(entry->nid);
1503
1504 /*
1505 * stop BG_GC if there is not enough free sections.
1506 * Or, stop GC if the segment becomes fully valid caused by
1507 * race condition along with SSR block allocation.
1508 */
1509 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1510 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1511 CAP_BLKS_PER_SEC(sbi)))
1512 return submitted;
1513
1514 if (check_valid_map(sbi, segno, off) == 0)
1515 continue;
1516
1517 if (phase == 0) {
1518 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1519 META_NAT, true);
1520 continue;
1521 }
1522
1523 if (phase == 1) {
1524 f2fs_ra_node_page(sbi, nid);
1525 continue;
1526 }
1527
1528 /* Get an inode by ino with checking validity */
1529 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1530 continue;
1531
1532 if (phase == 2) {
1533 f2fs_ra_node_page(sbi, dni.ino);
1534 continue;
1535 }
1536
1537 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1538
1539 if (phase == 3) {
1540 int err;
1541
1542 inode = f2fs_iget(sb, dni.ino);
1543 if (IS_ERR(inode) || is_bad_inode(inode) ||
1544 special_file(inode->i_mode))
1545 continue;
1546
1547 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1548 if (err == -EAGAIN) {
1549 iput(inode);
1550 return submitted;
1551 }
1552
1553 if (!f2fs_down_write_trylock(
1554 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1555 iput(inode);
1556 sbi->skipped_gc_rwsem++;
1557 continue;
1558 }
1559
1560 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1561 ofs_in_node;
1562
1563 if (f2fs_post_read_required(inode)) {
1564 int err = ra_data_block(inode, start_bidx);
1565
1566 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1567 if (err) {
1568 iput(inode);
1569 continue;
1570 }
1571 add_gc_inode(gc_list, inode);
1572 continue;
1573 }
1574
1575 data_page = f2fs_get_read_data_page(inode, start_bidx,
1576 REQ_RAHEAD, true, NULL);
1577 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1578 if (IS_ERR(data_page)) {
1579 iput(inode);
1580 continue;
1581 }
1582
1583 f2fs_put_page(data_page, 0);
1584 add_gc_inode(gc_list, inode);
1585 continue;
1586 }
1587
1588 /* phase 4 */
1589 inode = find_gc_inode(gc_list, dni.ino);
1590 if (inode) {
1591 struct f2fs_inode_info *fi = F2FS_I(inode);
1592 bool locked = false;
1593 int err;
1594
1595 if (S_ISREG(inode->i_mode)) {
1596 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
1597 sbi->skipped_gc_rwsem++;
1598 continue;
1599 }
1600 if (!f2fs_down_write_trylock(
1601 &fi->i_gc_rwsem[WRITE])) {
1602 sbi->skipped_gc_rwsem++;
1603 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1604 continue;
1605 }
1606 locked = true;
1607
1608 /* wait for all inflight aio data */
1609 inode_dio_wait(inode);
1610 }
1611
1612 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1613 + ofs_in_node;
1614 if (f2fs_post_read_required(inode))
1615 err = move_data_block(inode, start_bidx,
1616 gc_type, segno, off);
1617 else
1618 err = move_data_page(inode, start_bidx, gc_type,
1619 segno, off);
1620
1621 if (!err && (gc_type == FG_GC ||
1622 f2fs_post_read_required(inode)))
1623 submitted++;
1624
1625 if (locked) {
1626 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1627 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1628 }
1629
1630 stat_inc_data_blk_count(sbi, 1, gc_type);
1631 }
1632 }
1633
1634 if (++phase < 5)
1635 goto next_step;
1636
1637 return submitted;
1638}
1639
1640static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1641 int gc_type)
1642{
1643 struct sit_info *sit_i = SIT_I(sbi);
1644 int ret;
1645
1646 down_write(&sit_i->sentry_lock);
1647 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1648 NO_CHECK_TYPE, LFS, 0);
1649 up_write(&sit_i->sentry_lock);
1650 return ret;
1651}
1652
1653static int do_garbage_collect(struct f2fs_sb_info *sbi,
1654 unsigned int start_segno,
1655 struct gc_inode_list *gc_list, int gc_type,
1656 bool force_migrate)
1657{
1658 struct page *sum_page;
1659 struct f2fs_summary_block *sum;
1660 struct blk_plug plug;
1661 unsigned int segno = start_segno;
1662 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1663 int seg_freed = 0, migrated = 0;
1664 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1665 SUM_TYPE_DATA : SUM_TYPE_NODE;
1666 int submitted = 0;
1667
1668 if (__is_large_section(sbi))
1669 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1670
1671 /*
1672 * zone-capacity can be less than zone-size in zoned devices,
1673 * resulting in less than expected usable segments in the zone,
1674 * calculate the end segno in the zone which can be garbage collected
1675 */
1676 if (f2fs_sb_has_blkzoned(sbi))
1677 end_segno -= sbi->segs_per_sec -
1678 f2fs_usable_segs_in_sec(sbi, segno);
1679
1680 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1681
1682 /* readahead multi ssa blocks those have contiguous address */
1683 if (__is_large_section(sbi))
1684 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1685 end_segno - segno, META_SSA, true);
1686
1687 /* reference all summary page */
1688 while (segno < end_segno) {
1689 sum_page = f2fs_get_sum_page(sbi, segno++);
1690 if (IS_ERR(sum_page)) {
1691 int err = PTR_ERR(sum_page);
1692
1693 end_segno = segno - 1;
1694 for (segno = start_segno; segno < end_segno; segno++) {
1695 sum_page = find_get_page(META_MAPPING(sbi),
1696 GET_SUM_BLOCK(sbi, segno));
1697 f2fs_put_page(sum_page, 0);
1698 f2fs_put_page(sum_page, 0);
1699 }
1700 return err;
1701 }
1702 unlock_page(sum_page);
1703 }
1704
1705 blk_start_plug(&plug);
1706
1707 for (segno = start_segno; segno < end_segno; segno++) {
1708
1709 /* find segment summary of victim */
1710 sum_page = find_get_page(META_MAPPING(sbi),
1711 GET_SUM_BLOCK(sbi, segno));
1712 f2fs_put_page(sum_page, 0);
1713
1714 if (get_valid_blocks(sbi, segno, false) == 0)
1715 goto freed;
1716 if (gc_type == BG_GC && __is_large_section(sbi) &&
1717 migrated >= sbi->migration_granularity)
1718 goto skip;
1719 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1720 goto skip;
1721
1722 sum = page_address(sum_page);
1723 if (type != GET_SUM_TYPE((&sum->footer))) {
1724 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1725 segno, type, GET_SUM_TYPE((&sum->footer)));
1726 set_sbi_flag(sbi, SBI_NEED_FSCK);
1727 f2fs_stop_checkpoint(sbi, false,
1728 STOP_CP_REASON_CORRUPTED_SUMMARY);
1729 goto skip;
1730 }
1731
1732 /*
1733 * this is to avoid deadlock:
1734 * - lock_page(sum_page) - f2fs_replace_block
1735 * - check_valid_map() - down_write(sentry_lock)
1736 * - down_read(sentry_lock) - change_curseg()
1737 * - lock_page(sum_page)
1738 */
1739 if (type == SUM_TYPE_NODE)
1740 submitted += gc_node_segment(sbi, sum->entries, segno,
1741 gc_type);
1742 else
1743 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1744 segno, gc_type,
1745 force_migrate);
1746
1747 stat_inc_seg_count(sbi, type, gc_type);
1748 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1749 migrated++;
1750
1751freed:
1752 if (gc_type == FG_GC &&
1753 get_valid_blocks(sbi, segno, false) == 0)
1754 seg_freed++;
1755
1756 if (__is_large_section(sbi))
1757 sbi->next_victim_seg[gc_type] =
1758 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1759skip:
1760 f2fs_put_page(sum_page, 0);
1761 }
1762
1763 if (submitted)
1764 f2fs_submit_merged_write(sbi,
1765 (type == SUM_TYPE_NODE) ? NODE : DATA);
1766
1767 blk_finish_plug(&plug);
1768
1769 stat_inc_call_count(sbi->stat_info);
1770
1771 return seg_freed;
1772}
1773
1774int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1775{
1776 int gc_type = gc_control->init_gc_type;
1777 unsigned int segno = gc_control->victim_segno;
1778 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1779 int ret = 0;
1780 struct cp_control cpc;
1781 struct gc_inode_list gc_list = {
1782 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1783 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1784 };
1785 unsigned int skipped_round = 0, round = 0;
1786
1787 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1788 gc_control->nr_free_secs,
1789 get_pages(sbi, F2FS_DIRTY_NODES),
1790 get_pages(sbi, F2FS_DIRTY_DENTS),
1791 get_pages(sbi, F2FS_DIRTY_IMETA),
1792 free_sections(sbi),
1793 free_segments(sbi),
1794 reserved_segments(sbi),
1795 prefree_segments(sbi));
1796
1797 cpc.reason = __get_cp_reason(sbi);
1798 sbi->skipped_gc_rwsem = 0;
1799gc_more:
1800 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1801 ret = -EINVAL;
1802 goto stop;
1803 }
1804 if (unlikely(f2fs_cp_error(sbi))) {
1805 ret = -EIO;
1806 goto stop;
1807 }
1808
1809 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1810 /*
1811 * For example, if there are many prefree_segments below given
1812 * threshold, we can make them free by checkpoint. Then, we
1813 * secure free segments which doesn't need fggc any more.
1814 */
1815 if (prefree_segments(sbi)) {
1816 ret = f2fs_write_checkpoint(sbi, &cpc);
1817 if (ret)
1818 goto stop;
1819 }
1820 if (has_not_enough_free_secs(sbi, 0, 0))
1821 gc_type = FG_GC;
1822 }
1823
1824 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1825 if (gc_type == BG_GC && gc_control->no_bg_gc) {
1826 ret = -EINVAL;
1827 goto stop;
1828 }
1829retry:
1830 ret = __get_victim(sbi, &segno, gc_type);
1831 if (ret) {
1832 /* allow to search victim from sections has pinned data */
1833 if (ret == -ENODATA && gc_type == FG_GC &&
1834 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1835 f2fs_unpin_all_sections(sbi, false);
1836 goto retry;
1837 }
1838 goto stop;
1839 }
1840
1841 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1842 gc_control->should_migrate_blocks);
1843 total_freed += seg_freed;
1844
1845 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1846 sec_freed++;
1847
1848 if (gc_type == FG_GC)
1849 sbi->cur_victim_sec = NULL_SEGNO;
1850
1851 if (gc_control->init_gc_type == FG_GC ||
1852 !has_not_enough_free_secs(sbi,
1853 (gc_type == FG_GC) ? sec_freed : 0, 0)) {
1854 if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs)
1855 goto go_gc_more;
1856 goto stop;
1857 }
1858
1859 /* FG_GC stops GC by skip_count */
1860 if (gc_type == FG_GC) {
1861 if (sbi->skipped_gc_rwsem)
1862 skipped_round++;
1863 round++;
1864 if (skipped_round > MAX_SKIP_GC_COUNT &&
1865 skipped_round * 2 >= round) {
1866 ret = f2fs_write_checkpoint(sbi, &cpc);
1867 goto stop;
1868 }
1869 }
1870
1871 /* Write checkpoint to reclaim prefree segments */
1872 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1873 prefree_segments(sbi)) {
1874 ret = f2fs_write_checkpoint(sbi, &cpc);
1875 if (ret)
1876 goto stop;
1877 }
1878go_gc_more:
1879 segno = NULL_SEGNO;
1880 goto gc_more;
1881
1882stop:
1883 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1884 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1885
1886 if (gc_type == FG_GC)
1887 f2fs_unpin_all_sections(sbi, true);
1888
1889 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1890 get_pages(sbi, F2FS_DIRTY_NODES),
1891 get_pages(sbi, F2FS_DIRTY_DENTS),
1892 get_pages(sbi, F2FS_DIRTY_IMETA),
1893 free_sections(sbi),
1894 free_segments(sbi),
1895 reserved_segments(sbi),
1896 prefree_segments(sbi));
1897
1898 f2fs_up_write(&sbi->gc_lock);
1899
1900 put_gc_inode(&gc_list);
1901
1902 if (gc_control->err_gc_skipped && !ret)
1903 ret = sec_freed ? 0 : -EAGAIN;
1904 return ret;
1905}
1906
1907int __init f2fs_create_garbage_collection_cache(void)
1908{
1909 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1910 sizeof(struct victim_entry));
1911 return victim_entry_slab ? 0 : -ENOMEM;
1912}
1913
1914void f2fs_destroy_garbage_collection_cache(void)
1915{
1916 kmem_cache_destroy(victim_entry_slab);
1917}
1918
1919static void init_atgc_management(struct f2fs_sb_info *sbi)
1920{
1921 struct atgc_management *am = &sbi->am;
1922
1923 if (test_opt(sbi, ATGC) &&
1924 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1925 am->atgc_enabled = true;
1926
1927 am->root = RB_ROOT_CACHED;
1928 INIT_LIST_HEAD(&am->victim_list);
1929 am->victim_count = 0;
1930
1931 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1932 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1933 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1934 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1935}
1936
1937void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1938{
1939 DIRTY_I(sbi)->v_ops = &default_v_ops;
1940
1941 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1942
1943 /* give warm/cold data area from slower device */
1944 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1945 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1946 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1947
1948 init_atgc_management(sbi);
1949}
1950
1951static int free_segment_range(struct f2fs_sb_info *sbi,
1952 unsigned int secs, bool gc_only)
1953{
1954 unsigned int segno, next_inuse, start, end;
1955 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1956 int gc_mode, gc_type;
1957 int err = 0;
1958 int type;
1959
1960 /* Force block allocation for GC */
1961 MAIN_SECS(sbi) -= secs;
1962 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1963 end = MAIN_SEGS(sbi) - 1;
1964
1965 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1966 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1967 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1968 SIT_I(sbi)->last_victim[gc_mode] = 0;
1969
1970 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1971 if (sbi->next_victim_seg[gc_type] >= start)
1972 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1973 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1974
1975 /* Move out cursegs from the target range */
1976 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1977 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1978
1979 /* do GC to move out valid blocks in the range */
1980 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1981 struct gc_inode_list gc_list = {
1982 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1983 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1984 };
1985
1986 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1987 put_gc_inode(&gc_list);
1988
1989 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1990 err = -EAGAIN;
1991 goto out;
1992 }
1993 if (fatal_signal_pending(current)) {
1994 err = -ERESTARTSYS;
1995 goto out;
1996 }
1997 }
1998 if (gc_only)
1999 goto out;
2000
2001 err = f2fs_write_checkpoint(sbi, &cpc);
2002 if (err)
2003 goto out;
2004
2005 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2006 if (next_inuse <= end) {
2007 f2fs_err(sbi, "segno %u should be free but still inuse!",
2008 next_inuse);
2009 f2fs_bug_on(sbi, 1);
2010 }
2011out:
2012 MAIN_SECS(sbi) += secs;
2013 return err;
2014}
2015
2016static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2017{
2018 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2019 int section_count;
2020 int segment_count;
2021 int segment_count_main;
2022 long long block_count;
2023 int segs = secs * sbi->segs_per_sec;
2024
2025 f2fs_down_write(&sbi->sb_lock);
2026
2027 section_count = le32_to_cpu(raw_sb->section_count);
2028 segment_count = le32_to_cpu(raw_sb->segment_count);
2029 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2030 block_count = le64_to_cpu(raw_sb->block_count);
2031
2032 raw_sb->section_count = cpu_to_le32(section_count + secs);
2033 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2034 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2035 raw_sb->block_count = cpu_to_le64(block_count +
2036 (long long)segs * sbi->blocks_per_seg);
2037 if (f2fs_is_multi_device(sbi)) {
2038 int last_dev = sbi->s_ndevs - 1;
2039 int dev_segs =
2040 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2041
2042 raw_sb->devs[last_dev].total_segments =
2043 cpu_to_le32(dev_segs + segs);
2044 }
2045
2046 f2fs_up_write(&sbi->sb_lock);
2047}
2048
2049static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2050{
2051 int segs = secs * sbi->segs_per_sec;
2052 long long blks = (long long)segs * sbi->blocks_per_seg;
2053 long long user_block_count =
2054 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2055
2056 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2057 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2058 MAIN_SECS(sbi) += secs;
2059 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2060 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2061 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2062
2063 if (f2fs_is_multi_device(sbi)) {
2064 int last_dev = sbi->s_ndevs - 1;
2065
2066 FDEV(last_dev).total_segments =
2067 (int)FDEV(last_dev).total_segments + segs;
2068 FDEV(last_dev).end_blk =
2069 (long long)FDEV(last_dev).end_blk + blks;
2070#ifdef CONFIG_BLK_DEV_ZONED
2071 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
2072 (int)(blks >> sbi->log_blocks_per_blkz);
2073#endif
2074 }
2075}
2076
2077int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
2078{
2079 __u64 old_block_count, shrunk_blocks;
2080 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2081 unsigned int secs;
2082 int err = 0;
2083 __u32 rem;
2084
2085 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2086 if (block_count > old_block_count)
2087 return -EINVAL;
2088
2089 if (f2fs_is_multi_device(sbi)) {
2090 int last_dev = sbi->s_ndevs - 1;
2091 __u64 last_segs = FDEV(last_dev).total_segments;
2092
2093 if (block_count + last_segs * sbi->blocks_per_seg <=
2094 old_block_count)
2095 return -EINVAL;
2096 }
2097
2098 /* new fs size should align to section size */
2099 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2100 if (rem)
2101 return -EINVAL;
2102
2103 if (block_count == old_block_count)
2104 return 0;
2105
2106 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2107 f2fs_err(sbi, "Should run fsck to repair first.");
2108 return -EFSCORRUPTED;
2109 }
2110
2111 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2112 f2fs_err(sbi, "Checkpoint should be enabled.");
2113 return -EINVAL;
2114 }
2115
2116 shrunk_blocks = old_block_count - block_count;
2117 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2118
2119 /* stop other GC */
2120 if (!f2fs_down_write_trylock(&sbi->gc_lock))
2121 return -EAGAIN;
2122
2123 /* stop CP to protect MAIN_SEC in free_segment_range */
2124 f2fs_lock_op(sbi);
2125
2126 spin_lock(&sbi->stat_lock);
2127 if (shrunk_blocks + valid_user_blocks(sbi) +
2128 sbi->current_reserved_blocks + sbi->unusable_block_count +
2129 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2130 err = -ENOSPC;
2131 spin_unlock(&sbi->stat_lock);
2132
2133 if (err)
2134 goto out_unlock;
2135
2136 err = free_segment_range(sbi, secs, true);
2137
2138out_unlock:
2139 f2fs_unlock_op(sbi);
2140 f2fs_up_write(&sbi->gc_lock);
2141 if (err)
2142 return err;
2143
2144 freeze_super(sbi->sb);
2145 f2fs_down_write(&sbi->gc_lock);
2146 f2fs_down_write(&sbi->cp_global_sem);
2147
2148 spin_lock(&sbi->stat_lock);
2149 if (shrunk_blocks + valid_user_blocks(sbi) +
2150 sbi->current_reserved_blocks + sbi->unusable_block_count +
2151 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2152 err = -ENOSPC;
2153 else
2154 sbi->user_block_count -= shrunk_blocks;
2155 spin_unlock(&sbi->stat_lock);
2156 if (err)
2157 goto out_err;
2158
2159 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2160 err = free_segment_range(sbi, secs, false);
2161 if (err)
2162 goto recover_out;
2163
2164 update_sb_metadata(sbi, -secs);
2165
2166 err = f2fs_commit_super(sbi, false);
2167 if (err) {
2168 update_sb_metadata(sbi, secs);
2169 goto recover_out;
2170 }
2171
2172 update_fs_metadata(sbi, -secs);
2173 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2174 set_sbi_flag(sbi, SBI_IS_DIRTY);
2175
2176 err = f2fs_write_checkpoint(sbi, &cpc);
2177 if (err) {
2178 update_fs_metadata(sbi, secs);
2179 update_sb_metadata(sbi, secs);
2180 f2fs_commit_super(sbi, false);
2181 }
2182recover_out:
2183 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2184 if (err) {
2185 set_sbi_flag(sbi, SBI_NEED_FSCK);
2186 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2187
2188 spin_lock(&sbi->stat_lock);
2189 sbi->user_block_count += shrunk_blocks;
2190 spin_unlock(&sbi->stat_lock);
2191 }
2192out_err:
2193 f2fs_up_write(&sbi->cp_global_sem);
2194 f2fs_up_write(&sbi->gc_lock);
2195 thaw_super(sbi->sb);
2196 return err;
2197}