Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/bio.h>
10#include <linux/mpage.h>
11#include <linux/writeback.h>
12#include <linux/blkdev.h>
13#include <linux/f2fs_fs.h>
14#include <linux/pagevec.h>
15#include <linux/swap.h>
16#include <linux/kthread.h>
17
18#include "f2fs.h"
19#include "node.h"
20#include "segment.h"
21#include <trace/events/f2fs.h>
22
23#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
24
25static struct kmem_cache *ino_entry_slab;
26struct kmem_cache *f2fs_inode_entry_slab;
27
28void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
29{
30 f2fs_build_fault_attr(sbi, 0, 0);
31 set_ckpt_flags(sbi, CP_ERROR_FLAG);
32 if (!end_io)
33 f2fs_flush_merged_writes(sbi);
34}
35
36/*
37 * We guarantee no failure on the returned page.
38 */
39struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
40{
41 struct address_space *mapping = META_MAPPING(sbi);
42 struct page *page;
43repeat:
44 page = f2fs_grab_cache_page(mapping, index, false);
45 if (!page) {
46 cond_resched();
47 goto repeat;
48 }
49 f2fs_wait_on_page_writeback(page, META, true, true);
50 if (!PageUptodate(page))
51 SetPageUptodate(page);
52 return page;
53}
54
55static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
56 bool is_meta)
57{
58 struct address_space *mapping = META_MAPPING(sbi);
59 struct page *page;
60 struct f2fs_io_info fio = {
61 .sbi = sbi,
62 .type = META,
63 .op = REQ_OP_READ,
64 .op_flags = REQ_META | REQ_PRIO,
65 .old_blkaddr = index,
66 .new_blkaddr = index,
67 .encrypted_page = NULL,
68 .is_por = !is_meta,
69 };
70 int err;
71
72 if (unlikely(!is_meta))
73 fio.op_flags &= ~REQ_META;
74repeat:
75 page = f2fs_grab_cache_page(mapping, index, false);
76 if (!page) {
77 cond_resched();
78 goto repeat;
79 }
80 if (PageUptodate(page))
81 goto out;
82
83 fio.page = page;
84
85 err = f2fs_submit_page_bio(&fio);
86 if (err) {
87 f2fs_put_page(page, 1);
88 return ERR_PTR(err);
89 }
90
91 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
92
93 lock_page(page);
94 if (unlikely(page->mapping != mapping)) {
95 f2fs_put_page(page, 1);
96 goto repeat;
97 }
98
99 if (unlikely(!PageUptodate(page))) {
100 f2fs_put_page(page, 1);
101 return ERR_PTR(-EIO);
102 }
103out:
104 return page;
105}
106
107struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
108{
109 return __get_meta_page(sbi, index, true);
110}
111
112struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index)
113{
114 struct page *page;
115 int count = 0;
116
117retry:
118 page = __get_meta_page(sbi, index, true);
119 if (IS_ERR(page)) {
120 if (PTR_ERR(page) == -EIO &&
121 ++count <= DEFAULT_RETRY_IO_COUNT)
122 goto retry;
123 f2fs_stop_checkpoint(sbi, false);
124 }
125 return page;
126}
127
128/* for POR only */
129struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
130{
131 return __get_meta_page(sbi, index, false);
132}
133
134static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
135 int type)
136{
137 struct seg_entry *se;
138 unsigned int segno, offset;
139 bool exist;
140
141 if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
142 return true;
143
144 segno = GET_SEGNO(sbi, blkaddr);
145 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
146 se = get_seg_entry(sbi, segno);
147
148 exist = f2fs_test_bit(offset, se->cur_valid_map);
149 if (!exist && type == DATA_GENERIC_ENHANCE) {
150 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
151 blkaddr, exist);
152 set_sbi_flag(sbi, SBI_NEED_FSCK);
153 WARN_ON(1);
154 }
155 return exist;
156}
157
158bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
159 block_t blkaddr, int type)
160{
161 switch (type) {
162 case META_NAT:
163 break;
164 case META_SIT:
165 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
166 return false;
167 break;
168 case META_SSA:
169 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
170 blkaddr < SM_I(sbi)->ssa_blkaddr))
171 return false;
172 break;
173 case META_CP:
174 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
175 blkaddr < __start_cp_addr(sbi)))
176 return false;
177 break;
178 case META_POR:
179 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
180 blkaddr < MAIN_BLKADDR(sbi)))
181 return false;
182 break;
183 case DATA_GENERIC:
184 case DATA_GENERIC_ENHANCE:
185 case DATA_GENERIC_ENHANCE_READ:
186 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
187 blkaddr < MAIN_BLKADDR(sbi))) {
188 f2fs_warn(sbi, "access invalid blkaddr:%u",
189 blkaddr);
190 set_sbi_flag(sbi, SBI_NEED_FSCK);
191 WARN_ON(1);
192 return false;
193 } else {
194 return __is_bitmap_valid(sbi, blkaddr, type);
195 }
196 break;
197 case META_GENERIC:
198 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
199 blkaddr >= MAIN_BLKADDR(sbi)))
200 return false;
201 break;
202 default:
203 BUG();
204 }
205
206 return true;
207}
208
209/*
210 * Readahead CP/NAT/SIT/SSA/POR pages
211 */
212int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
213 int type, bool sync)
214{
215 struct page *page;
216 block_t blkno = start;
217 struct f2fs_io_info fio = {
218 .sbi = sbi,
219 .type = META,
220 .op = REQ_OP_READ,
221 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
222 .encrypted_page = NULL,
223 .in_list = false,
224 .is_por = (type == META_POR),
225 };
226 struct blk_plug plug;
227 int err;
228
229 if (unlikely(type == META_POR))
230 fio.op_flags &= ~REQ_META;
231
232 blk_start_plug(&plug);
233 for (; nrpages-- > 0; blkno++) {
234
235 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
236 goto out;
237
238 switch (type) {
239 case META_NAT:
240 if (unlikely(blkno >=
241 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
242 blkno = 0;
243 /* get nat block addr */
244 fio.new_blkaddr = current_nat_addr(sbi,
245 blkno * NAT_ENTRY_PER_BLOCK);
246 break;
247 case META_SIT:
248 if (unlikely(blkno >= TOTAL_SEGS(sbi)))
249 goto out;
250 /* get sit block addr */
251 fio.new_blkaddr = current_sit_addr(sbi,
252 blkno * SIT_ENTRY_PER_BLOCK);
253 break;
254 case META_SSA:
255 case META_CP:
256 case META_POR:
257 fio.new_blkaddr = blkno;
258 break;
259 default:
260 BUG();
261 }
262
263 page = f2fs_grab_cache_page(META_MAPPING(sbi),
264 fio.new_blkaddr, false);
265 if (!page)
266 continue;
267 if (PageUptodate(page)) {
268 f2fs_put_page(page, 1);
269 continue;
270 }
271
272 fio.page = page;
273 err = f2fs_submit_page_bio(&fio);
274 f2fs_put_page(page, err ? 1 : 0);
275
276 if (!err)
277 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
278 }
279out:
280 blk_finish_plug(&plug);
281 return blkno - start;
282}
283
284void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
285{
286 struct page *page;
287 bool readahead = false;
288
289 page = find_get_page(META_MAPPING(sbi), index);
290 if (!page || !PageUptodate(page))
291 readahead = true;
292 f2fs_put_page(page, 0);
293
294 if (readahead)
295 f2fs_ra_meta_pages(sbi, index, BIO_MAX_VECS, META_POR, true);
296}
297
298static int __f2fs_write_meta_page(struct page *page,
299 struct writeback_control *wbc,
300 enum iostat_type io_type)
301{
302 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
303
304 trace_f2fs_writepage(page, META);
305
306 if (unlikely(f2fs_cp_error(sbi)))
307 goto redirty_out;
308 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
309 goto redirty_out;
310 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
311 goto redirty_out;
312
313 f2fs_do_write_meta_page(sbi, page, io_type);
314 dec_page_count(sbi, F2FS_DIRTY_META);
315
316 if (wbc->for_reclaim)
317 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
318
319 unlock_page(page);
320
321 if (unlikely(f2fs_cp_error(sbi)))
322 f2fs_submit_merged_write(sbi, META);
323
324 return 0;
325
326redirty_out:
327 redirty_page_for_writepage(wbc, page);
328 return AOP_WRITEPAGE_ACTIVATE;
329}
330
331static int f2fs_write_meta_page(struct page *page,
332 struct writeback_control *wbc)
333{
334 return __f2fs_write_meta_page(page, wbc, FS_META_IO);
335}
336
337static int f2fs_write_meta_pages(struct address_space *mapping,
338 struct writeback_control *wbc)
339{
340 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
341 long diff, written;
342
343 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
344 goto skip_write;
345
346 /* collect a number of dirty meta pages and write together */
347 if (wbc->sync_mode != WB_SYNC_ALL &&
348 get_pages(sbi, F2FS_DIRTY_META) <
349 nr_pages_to_skip(sbi, META))
350 goto skip_write;
351
352 /* if locked failed, cp will flush dirty pages instead */
353 if (!down_write_trylock(&sbi->cp_global_sem))
354 goto skip_write;
355
356 trace_f2fs_writepages(mapping->host, wbc, META);
357 diff = nr_pages_to_write(sbi, META, wbc);
358 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
359 up_write(&sbi->cp_global_sem);
360 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
361 return 0;
362
363skip_write:
364 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
365 trace_f2fs_writepages(mapping->host, wbc, META);
366 return 0;
367}
368
369long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
370 long nr_to_write, enum iostat_type io_type)
371{
372 struct address_space *mapping = META_MAPPING(sbi);
373 pgoff_t index = 0, prev = ULONG_MAX;
374 struct pagevec pvec;
375 long nwritten = 0;
376 int nr_pages;
377 struct writeback_control wbc = {
378 .for_reclaim = 0,
379 };
380 struct blk_plug plug;
381
382 pagevec_init(&pvec);
383
384 blk_start_plug(&plug);
385
386 while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
387 PAGECACHE_TAG_DIRTY))) {
388 int i;
389
390 for (i = 0; i < nr_pages; i++) {
391 struct page *page = pvec.pages[i];
392
393 if (prev == ULONG_MAX)
394 prev = page->index - 1;
395 if (nr_to_write != LONG_MAX && page->index != prev + 1) {
396 pagevec_release(&pvec);
397 goto stop;
398 }
399
400 lock_page(page);
401
402 if (unlikely(page->mapping != mapping)) {
403continue_unlock:
404 unlock_page(page);
405 continue;
406 }
407 if (!PageDirty(page)) {
408 /* someone wrote it for us */
409 goto continue_unlock;
410 }
411
412 f2fs_wait_on_page_writeback(page, META, true, true);
413
414 if (!clear_page_dirty_for_io(page))
415 goto continue_unlock;
416
417 if (__f2fs_write_meta_page(page, &wbc, io_type)) {
418 unlock_page(page);
419 break;
420 }
421 nwritten++;
422 prev = page->index;
423 if (unlikely(nwritten >= nr_to_write))
424 break;
425 }
426 pagevec_release(&pvec);
427 cond_resched();
428 }
429stop:
430 if (nwritten)
431 f2fs_submit_merged_write(sbi, type);
432
433 blk_finish_plug(&plug);
434
435 return nwritten;
436}
437
438static int f2fs_set_meta_page_dirty(struct page *page)
439{
440 trace_f2fs_set_page_dirty(page, META);
441
442 if (!PageUptodate(page))
443 SetPageUptodate(page);
444 if (!PageDirty(page)) {
445 __set_page_dirty_nobuffers(page);
446 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
447 set_page_private_reference(page);
448 return 1;
449 }
450 return 0;
451}
452
453const struct address_space_operations f2fs_meta_aops = {
454 .writepage = f2fs_write_meta_page,
455 .writepages = f2fs_write_meta_pages,
456 .set_page_dirty = f2fs_set_meta_page_dirty,
457 .invalidatepage = f2fs_invalidate_page,
458 .releasepage = f2fs_release_page,
459#ifdef CONFIG_MIGRATION
460 .migratepage = f2fs_migrate_page,
461#endif
462};
463
464static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
465 unsigned int devidx, int type)
466{
467 struct inode_management *im = &sbi->im[type];
468 struct ino_entry *e, *tmp;
469
470 tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
471
472 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
473
474 spin_lock(&im->ino_lock);
475 e = radix_tree_lookup(&im->ino_root, ino);
476 if (!e) {
477 e = tmp;
478 if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
479 f2fs_bug_on(sbi, 1);
480
481 memset(e, 0, sizeof(struct ino_entry));
482 e->ino = ino;
483
484 list_add_tail(&e->list, &im->ino_list);
485 if (type != ORPHAN_INO)
486 im->ino_num++;
487 }
488
489 if (type == FLUSH_INO)
490 f2fs_set_bit(devidx, (char *)&e->dirty_device);
491
492 spin_unlock(&im->ino_lock);
493 radix_tree_preload_end();
494
495 if (e != tmp)
496 kmem_cache_free(ino_entry_slab, tmp);
497}
498
499static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
500{
501 struct inode_management *im = &sbi->im[type];
502 struct ino_entry *e;
503
504 spin_lock(&im->ino_lock);
505 e = radix_tree_lookup(&im->ino_root, ino);
506 if (e) {
507 list_del(&e->list);
508 radix_tree_delete(&im->ino_root, ino);
509 im->ino_num--;
510 spin_unlock(&im->ino_lock);
511 kmem_cache_free(ino_entry_slab, e);
512 return;
513 }
514 spin_unlock(&im->ino_lock);
515}
516
517void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
518{
519 /* add new dirty ino entry into list */
520 __add_ino_entry(sbi, ino, 0, type);
521}
522
523void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
524{
525 /* remove dirty ino entry from list */
526 __remove_ino_entry(sbi, ino, type);
527}
528
529/* mode should be APPEND_INO, UPDATE_INO or TRANS_DIR_INO */
530bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
531{
532 struct inode_management *im = &sbi->im[mode];
533 struct ino_entry *e;
534
535 spin_lock(&im->ino_lock);
536 e = radix_tree_lookup(&im->ino_root, ino);
537 spin_unlock(&im->ino_lock);
538 return e ? true : false;
539}
540
541void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
542{
543 struct ino_entry *e, *tmp;
544 int i;
545
546 for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) {
547 struct inode_management *im = &sbi->im[i];
548
549 spin_lock(&im->ino_lock);
550 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
551 list_del(&e->list);
552 radix_tree_delete(&im->ino_root, e->ino);
553 kmem_cache_free(ino_entry_slab, e);
554 im->ino_num--;
555 }
556 spin_unlock(&im->ino_lock);
557 }
558}
559
560void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
561 unsigned int devidx, int type)
562{
563 __add_ino_entry(sbi, ino, devidx, type);
564}
565
566bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
567 unsigned int devidx, int type)
568{
569 struct inode_management *im = &sbi->im[type];
570 struct ino_entry *e;
571 bool is_dirty = false;
572
573 spin_lock(&im->ino_lock);
574 e = radix_tree_lookup(&im->ino_root, ino);
575 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
576 is_dirty = true;
577 spin_unlock(&im->ino_lock);
578 return is_dirty;
579}
580
581int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
582{
583 struct inode_management *im = &sbi->im[ORPHAN_INO];
584 int err = 0;
585
586 spin_lock(&im->ino_lock);
587
588 if (time_to_inject(sbi, FAULT_ORPHAN)) {
589 spin_unlock(&im->ino_lock);
590 f2fs_show_injection_info(sbi, FAULT_ORPHAN);
591 return -ENOSPC;
592 }
593
594 if (unlikely(im->ino_num >= sbi->max_orphans))
595 err = -ENOSPC;
596 else
597 im->ino_num++;
598 spin_unlock(&im->ino_lock);
599
600 return err;
601}
602
603void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
604{
605 struct inode_management *im = &sbi->im[ORPHAN_INO];
606
607 spin_lock(&im->ino_lock);
608 f2fs_bug_on(sbi, im->ino_num == 0);
609 im->ino_num--;
610 spin_unlock(&im->ino_lock);
611}
612
613void f2fs_add_orphan_inode(struct inode *inode)
614{
615 /* add new orphan ino entry into list */
616 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
617 f2fs_update_inode_page(inode);
618}
619
620void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
621{
622 /* remove orphan entry from orphan list */
623 __remove_ino_entry(sbi, ino, ORPHAN_INO);
624}
625
626static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
627{
628 struct inode *inode;
629 struct node_info ni;
630 int err;
631
632 inode = f2fs_iget_retry(sbi->sb, ino);
633 if (IS_ERR(inode)) {
634 /*
635 * there should be a bug that we can't find the entry
636 * to orphan inode.
637 */
638 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
639 return PTR_ERR(inode);
640 }
641
642 err = dquot_initialize(inode);
643 if (err) {
644 iput(inode);
645 goto err_out;
646 }
647
648 clear_nlink(inode);
649
650 /* truncate all the data during iput */
651 iput(inode);
652
653 err = f2fs_get_node_info(sbi, ino, &ni);
654 if (err)
655 goto err_out;
656
657 /* ENOMEM was fully retried in f2fs_evict_inode. */
658 if (ni.blk_addr != NULL_ADDR) {
659 err = -EIO;
660 goto err_out;
661 }
662 return 0;
663
664err_out:
665 set_sbi_flag(sbi, SBI_NEED_FSCK);
666 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
667 __func__, ino);
668 return err;
669}
670
671int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
672{
673 block_t start_blk, orphan_blocks, i, j;
674 unsigned int s_flags = sbi->sb->s_flags;
675 int err = 0;
676#ifdef CONFIG_QUOTA
677 int quota_enabled;
678#endif
679
680 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
681 return 0;
682
683 if (bdev_read_only(sbi->sb->s_bdev)) {
684 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
685 return 0;
686 }
687
688 if (s_flags & SB_RDONLY) {
689 f2fs_info(sbi, "orphan cleanup on readonly fs");
690 sbi->sb->s_flags &= ~SB_RDONLY;
691 }
692
693#ifdef CONFIG_QUOTA
694 /* Needed for iput() to work correctly and not trash data */
695 sbi->sb->s_flags |= SB_ACTIVE;
696
697 /*
698 * Turn on quotas which were not enabled for read-only mounts if
699 * filesystem has quota feature, so that they are updated correctly.
700 */
701 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
702#endif
703
704 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
705 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
706
707 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
708
709 for (i = 0; i < orphan_blocks; i++) {
710 struct page *page;
711 struct f2fs_orphan_block *orphan_blk;
712
713 page = f2fs_get_meta_page(sbi, start_blk + i);
714 if (IS_ERR(page)) {
715 err = PTR_ERR(page);
716 goto out;
717 }
718
719 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
720 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
721 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
722
723 err = recover_orphan_inode(sbi, ino);
724 if (err) {
725 f2fs_put_page(page, 1);
726 goto out;
727 }
728 }
729 f2fs_put_page(page, 1);
730 }
731 /* clear Orphan Flag */
732 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
733out:
734 set_sbi_flag(sbi, SBI_IS_RECOVERED);
735
736#ifdef CONFIG_QUOTA
737 /* Turn quotas off */
738 if (quota_enabled)
739 f2fs_quota_off_umount(sbi->sb);
740#endif
741 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
742
743 return err;
744}
745
746static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
747{
748 struct list_head *head;
749 struct f2fs_orphan_block *orphan_blk = NULL;
750 unsigned int nentries = 0;
751 unsigned short index = 1;
752 unsigned short orphan_blocks;
753 struct page *page = NULL;
754 struct ino_entry *orphan = NULL;
755 struct inode_management *im = &sbi->im[ORPHAN_INO];
756
757 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
758
759 /*
760 * we don't need to do spin_lock(&im->ino_lock) here, since all the
761 * orphan inode operations are covered under f2fs_lock_op().
762 * And, spin_lock should be avoided due to page operations below.
763 */
764 head = &im->ino_list;
765
766 /* loop for each orphan inode entry and write them in Jornal block */
767 list_for_each_entry(orphan, head, list) {
768 if (!page) {
769 page = f2fs_grab_meta_page(sbi, start_blk++);
770 orphan_blk =
771 (struct f2fs_orphan_block *)page_address(page);
772 memset(orphan_blk, 0, sizeof(*orphan_blk));
773 }
774
775 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
776
777 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
778 /*
779 * an orphan block is full of 1020 entries,
780 * then we need to flush current orphan blocks
781 * and bring another one in memory
782 */
783 orphan_blk->blk_addr = cpu_to_le16(index);
784 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
785 orphan_blk->entry_count = cpu_to_le32(nentries);
786 set_page_dirty(page);
787 f2fs_put_page(page, 1);
788 index++;
789 nentries = 0;
790 page = NULL;
791 }
792 }
793
794 if (page) {
795 orphan_blk->blk_addr = cpu_to_le16(index);
796 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
797 orphan_blk->entry_count = cpu_to_le32(nentries);
798 set_page_dirty(page);
799 f2fs_put_page(page, 1);
800 }
801}
802
803static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
804 struct f2fs_checkpoint *ckpt)
805{
806 unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
807 __u32 chksum;
808
809 chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
810 if (chksum_ofs < CP_CHKSUM_OFFSET) {
811 chksum_ofs += sizeof(chksum);
812 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
813 F2FS_BLKSIZE - chksum_ofs);
814 }
815 return chksum;
816}
817
818static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
819 struct f2fs_checkpoint **cp_block, struct page **cp_page,
820 unsigned long long *version)
821{
822 size_t crc_offset = 0;
823 __u32 crc;
824
825 *cp_page = f2fs_get_meta_page(sbi, cp_addr);
826 if (IS_ERR(*cp_page))
827 return PTR_ERR(*cp_page);
828
829 *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
830
831 crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
832 if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
833 crc_offset > CP_CHKSUM_OFFSET) {
834 f2fs_put_page(*cp_page, 1);
835 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
836 return -EINVAL;
837 }
838
839 crc = f2fs_checkpoint_chksum(sbi, *cp_block);
840 if (crc != cur_cp_crc(*cp_block)) {
841 f2fs_put_page(*cp_page, 1);
842 f2fs_warn(sbi, "invalid crc value");
843 return -EINVAL;
844 }
845
846 *version = cur_cp_version(*cp_block);
847 return 0;
848}
849
850static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
851 block_t cp_addr, unsigned long long *version)
852{
853 struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
854 struct f2fs_checkpoint *cp_block = NULL;
855 unsigned long long cur_version = 0, pre_version = 0;
856 int err;
857
858 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
859 &cp_page_1, version);
860 if (err)
861 return NULL;
862
863 if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
864 sbi->blocks_per_seg) {
865 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
866 le32_to_cpu(cp_block->cp_pack_total_block_count));
867 goto invalid_cp;
868 }
869 pre_version = *version;
870
871 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
872 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
873 &cp_page_2, version);
874 if (err)
875 goto invalid_cp;
876 cur_version = *version;
877
878 if (cur_version == pre_version) {
879 *version = cur_version;
880 f2fs_put_page(cp_page_2, 1);
881 return cp_page_1;
882 }
883 f2fs_put_page(cp_page_2, 1);
884invalid_cp:
885 f2fs_put_page(cp_page_1, 1);
886 return NULL;
887}
888
889int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
890{
891 struct f2fs_checkpoint *cp_block;
892 struct f2fs_super_block *fsb = sbi->raw_super;
893 struct page *cp1, *cp2, *cur_page;
894 unsigned long blk_size = sbi->blocksize;
895 unsigned long long cp1_version = 0, cp2_version = 0;
896 unsigned long long cp_start_blk_no;
897 unsigned int cp_blks = 1 + __cp_payload(sbi);
898 block_t cp_blk_no;
899 int i;
900 int err;
901
902 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
903 GFP_KERNEL);
904 if (!sbi->ckpt)
905 return -ENOMEM;
906 /*
907 * Finding out valid cp block involves read both
908 * sets( cp pack 1 and cp pack 2)
909 */
910 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
911 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
912
913 /* The second checkpoint pack should start at the next segment */
914 cp_start_blk_no += ((unsigned long long)1) <<
915 le32_to_cpu(fsb->log_blocks_per_seg);
916 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
917
918 if (cp1 && cp2) {
919 if (ver_after(cp2_version, cp1_version))
920 cur_page = cp2;
921 else
922 cur_page = cp1;
923 } else if (cp1) {
924 cur_page = cp1;
925 } else if (cp2) {
926 cur_page = cp2;
927 } else {
928 err = -EFSCORRUPTED;
929 goto fail_no_cp;
930 }
931
932 cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
933 memcpy(sbi->ckpt, cp_block, blk_size);
934
935 if (cur_page == cp1)
936 sbi->cur_cp_pack = 1;
937 else
938 sbi->cur_cp_pack = 2;
939
940 /* Sanity checking of checkpoint */
941 if (f2fs_sanity_check_ckpt(sbi)) {
942 err = -EFSCORRUPTED;
943 goto free_fail_no_cp;
944 }
945
946 if (cp_blks <= 1)
947 goto done;
948
949 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
950 if (cur_page == cp2)
951 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
952
953 for (i = 1; i < cp_blks; i++) {
954 void *sit_bitmap_ptr;
955 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
956
957 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
958 if (IS_ERR(cur_page)) {
959 err = PTR_ERR(cur_page);
960 goto free_fail_no_cp;
961 }
962 sit_bitmap_ptr = page_address(cur_page);
963 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
964 f2fs_put_page(cur_page, 1);
965 }
966done:
967 f2fs_put_page(cp1, 1);
968 f2fs_put_page(cp2, 1);
969 return 0;
970
971free_fail_no_cp:
972 f2fs_put_page(cp1, 1);
973 f2fs_put_page(cp2, 1);
974fail_no_cp:
975 kvfree(sbi->ckpt);
976 return err;
977}
978
979static void __add_dirty_inode(struct inode *inode, enum inode_type type)
980{
981 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
982 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
983
984 if (is_inode_flag_set(inode, flag))
985 return;
986
987 set_inode_flag(inode, flag);
988 if (!f2fs_is_volatile_file(inode))
989 list_add_tail(&F2FS_I(inode)->dirty_list,
990 &sbi->inode_list[type]);
991 stat_inc_dirty_inode(sbi, type);
992}
993
994static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
995{
996 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
997
998 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
999 return;
1000
1001 list_del_init(&F2FS_I(inode)->dirty_list);
1002 clear_inode_flag(inode, flag);
1003 stat_dec_dirty_inode(F2FS_I_SB(inode), type);
1004}
1005
1006void f2fs_update_dirty_page(struct inode *inode, struct page *page)
1007{
1008 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1009 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1010
1011 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1012 !S_ISLNK(inode->i_mode))
1013 return;
1014
1015 spin_lock(&sbi->inode_lock[type]);
1016 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
1017 __add_dirty_inode(inode, type);
1018 inode_inc_dirty_pages(inode);
1019 spin_unlock(&sbi->inode_lock[type]);
1020
1021 set_page_private_reference(page);
1022}
1023
1024void f2fs_remove_dirty_inode(struct inode *inode)
1025{
1026 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1027 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1028
1029 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1030 !S_ISLNK(inode->i_mode))
1031 return;
1032
1033 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
1034 return;
1035
1036 spin_lock(&sbi->inode_lock[type]);
1037 __remove_dirty_inode(inode, type);
1038 spin_unlock(&sbi->inode_lock[type]);
1039}
1040
1041int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
1042{
1043 struct list_head *head;
1044 struct inode *inode;
1045 struct f2fs_inode_info *fi;
1046 bool is_dir = (type == DIR_INODE);
1047 unsigned long ino = 0;
1048
1049 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
1050 get_pages(sbi, is_dir ?
1051 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1052retry:
1053 if (unlikely(f2fs_cp_error(sbi))) {
1054 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1055 get_pages(sbi, is_dir ?
1056 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1057 return -EIO;
1058 }
1059
1060 spin_lock(&sbi->inode_lock[type]);
1061
1062 head = &sbi->inode_list[type];
1063 if (list_empty(head)) {
1064 spin_unlock(&sbi->inode_lock[type]);
1065 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1066 get_pages(sbi, is_dir ?
1067 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1068 return 0;
1069 }
1070 fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
1071 inode = igrab(&fi->vfs_inode);
1072 spin_unlock(&sbi->inode_lock[type]);
1073 if (inode) {
1074 unsigned long cur_ino = inode->i_ino;
1075
1076 F2FS_I(inode)->cp_task = current;
1077
1078 filemap_fdatawrite(inode->i_mapping);
1079
1080 F2FS_I(inode)->cp_task = NULL;
1081
1082 iput(inode);
1083 /* We need to give cpu to another writers. */
1084 if (ino == cur_ino)
1085 cond_resched();
1086 else
1087 ino = cur_ino;
1088 } else {
1089 /*
1090 * We should submit bio, since it exists several
1091 * wribacking dentry pages in the freeing inode.
1092 */
1093 f2fs_submit_merged_write(sbi, DATA);
1094 cond_resched();
1095 }
1096 goto retry;
1097}
1098
1099int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
1100{
1101 struct list_head *head = &sbi->inode_list[DIRTY_META];
1102 struct inode *inode;
1103 struct f2fs_inode_info *fi;
1104 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
1105
1106 while (total--) {
1107 if (unlikely(f2fs_cp_error(sbi)))
1108 return -EIO;
1109
1110 spin_lock(&sbi->inode_lock[DIRTY_META]);
1111 if (list_empty(head)) {
1112 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1113 return 0;
1114 }
1115 fi = list_first_entry(head, struct f2fs_inode_info,
1116 gdirty_list);
1117 inode = igrab(&fi->vfs_inode);
1118 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1119 if (inode) {
1120 sync_inode_metadata(inode, 0);
1121
1122 /* it's on eviction */
1123 if (is_inode_flag_set(inode, FI_DIRTY_INODE))
1124 f2fs_update_inode_page(inode);
1125 iput(inode);
1126 }
1127 }
1128 return 0;
1129}
1130
1131static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1132{
1133 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1134 struct f2fs_nm_info *nm_i = NM_I(sbi);
1135 nid_t last_nid = nm_i->next_scan_nid;
1136
1137 next_free_nid(sbi, &last_nid);
1138 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1139 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1140 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1141 ckpt->next_free_nid = cpu_to_le32(last_nid);
1142}
1143
1144static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1145{
1146 bool ret = false;
1147
1148 if (!is_journalled_quota(sbi))
1149 return false;
1150
1151 down_write(&sbi->quota_sem);
1152 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1153 ret = false;
1154 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1155 ret = false;
1156 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1157 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1158 ret = true;
1159 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1160 ret = true;
1161 }
1162 up_write(&sbi->quota_sem);
1163 return ret;
1164}
1165
1166/*
1167 * Freeze all the FS-operations for checkpoint.
1168 */
1169static int block_operations(struct f2fs_sb_info *sbi)
1170{
1171 struct writeback_control wbc = {
1172 .sync_mode = WB_SYNC_ALL,
1173 .nr_to_write = LONG_MAX,
1174 .for_reclaim = 0,
1175 };
1176 int err = 0, cnt = 0;
1177
1178 /*
1179 * Let's flush inline_data in dirty node pages.
1180 */
1181 f2fs_flush_inline_data(sbi);
1182
1183retry_flush_quotas:
1184 f2fs_lock_all(sbi);
1185 if (__need_flush_quota(sbi)) {
1186 int locked;
1187
1188 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1189 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1190 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1191 goto retry_flush_dents;
1192 }
1193 f2fs_unlock_all(sbi);
1194
1195 /* only failed during mount/umount/freeze/quotactl */
1196 locked = down_read_trylock(&sbi->sb->s_umount);
1197 f2fs_quota_sync(sbi->sb, -1);
1198 if (locked)
1199 up_read(&sbi->sb->s_umount);
1200 cond_resched();
1201 goto retry_flush_quotas;
1202 }
1203
1204retry_flush_dents:
1205 /* write all the dirty dentry pages */
1206 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1207 f2fs_unlock_all(sbi);
1208 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
1209 if (err)
1210 return err;
1211 cond_resched();
1212 goto retry_flush_quotas;
1213 }
1214
1215 /*
1216 * POR: we should ensure that there are no dirty node pages
1217 * until finishing nat/sit flush. inode->i_blocks can be updated.
1218 */
1219 down_write(&sbi->node_change);
1220
1221 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1222 up_write(&sbi->node_change);
1223 f2fs_unlock_all(sbi);
1224 err = f2fs_sync_inode_meta(sbi);
1225 if (err)
1226 return err;
1227 cond_resched();
1228 goto retry_flush_quotas;
1229 }
1230
1231retry_flush_nodes:
1232 down_write(&sbi->node_write);
1233
1234 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1235 up_write(&sbi->node_write);
1236 atomic_inc(&sbi->wb_sync_req[NODE]);
1237 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1238 atomic_dec(&sbi->wb_sync_req[NODE]);
1239 if (err) {
1240 up_write(&sbi->node_change);
1241 f2fs_unlock_all(sbi);
1242 return err;
1243 }
1244 cond_resched();
1245 goto retry_flush_nodes;
1246 }
1247
1248 /*
1249 * sbi->node_change is used only for AIO write_begin path which produces
1250 * dirty node blocks and some checkpoint values by block allocation.
1251 */
1252 __prepare_cp_block(sbi);
1253 up_write(&sbi->node_change);
1254 return err;
1255}
1256
1257static void unblock_operations(struct f2fs_sb_info *sbi)
1258{
1259 up_write(&sbi->node_write);
1260 f2fs_unlock_all(sbi);
1261}
1262
1263void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1264{
1265 DEFINE_WAIT(wait);
1266
1267 for (;;) {
1268 if (!get_pages(sbi, type))
1269 break;
1270
1271 if (unlikely(f2fs_cp_error(sbi)))
1272 break;
1273
1274 if (type == F2FS_DIRTY_META)
1275 f2fs_sync_meta_pages(sbi, META, LONG_MAX,
1276 FS_CP_META_IO);
1277 else if (type == F2FS_WB_CP_DATA)
1278 f2fs_submit_merged_write(sbi, DATA);
1279
1280 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1281 io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1282 }
1283 finish_wait(&sbi->cp_wait, &wait);
1284}
1285
1286static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1287{
1288 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1289 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1290 unsigned long flags;
1291
1292 spin_lock_irqsave(&sbi->cp_lock, flags);
1293
1294 if ((cpc->reason & CP_UMOUNT) &&
1295 le32_to_cpu(ckpt->cp_pack_total_block_count) >
1296 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1297 disable_nat_bits(sbi, false);
1298
1299 if (cpc->reason & CP_TRIMMED)
1300 __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1301 else
1302 __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1303
1304 if (cpc->reason & CP_UMOUNT)
1305 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1306 else
1307 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1308
1309 if (cpc->reason & CP_FASTBOOT)
1310 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1311 else
1312 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1313
1314 if (orphan_num)
1315 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1316 else
1317 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1318
1319 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1320 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1321
1322 if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1323 __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1324 else
1325 __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1326
1327 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1328 __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1329 else
1330 __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1331
1332 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1333 __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1334 else
1335 __clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1336
1337 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
1338 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1339 else
1340 __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1341
1342 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
1343 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1344
1345 /* set this flag to activate crc|cp_ver for recovery */
1346 __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
1347 __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
1348
1349 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1350}
1351
1352static void commit_checkpoint(struct f2fs_sb_info *sbi,
1353 void *src, block_t blk_addr)
1354{
1355 struct writeback_control wbc = {
1356 .for_reclaim = 0,
1357 };
1358
1359 /*
1360 * pagevec_lookup_tag and lock_page again will take
1361 * some extra time. Therefore, f2fs_update_meta_pages and
1362 * f2fs_sync_meta_pages are combined in this function.
1363 */
1364 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
1365 int err;
1366
1367 f2fs_wait_on_page_writeback(page, META, true, true);
1368
1369 memcpy(page_address(page), src, PAGE_SIZE);
1370
1371 set_page_dirty(page);
1372 if (unlikely(!clear_page_dirty_for_io(page)))
1373 f2fs_bug_on(sbi, 1);
1374
1375 /* writeout cp pack 2 page */
1376 err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
1377 if (unlikely(err && f2fs_cp_error(sbi))) {
1378 f2fs_put_page(page, 1);
1379 return;
1380 }
1381
1382 f2fs_bug_on(sbi, err);
1383 f2fs_put_page(page, 0);
1384
1385 /* submit checkpoint (with barrier if NOBARRIER is not set) */
1386 f2fs_submit_merged_write(sbi, META_FLUSH);
1387}
1388
1389static inline u64 get_sectors_written(struct block_device *bdev)
1390{
1391 return (u64)part_stat_read(bdev, sectors[STAT_WRITE]);
1392}
1393
1394u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi)
1395{
1396 if (f2fs_is_multi_device(sbi)) {
1397 u64 sectors = 0;
1398 int i;
1399
1400 for (i = 0; i < sbi->s_ndevs; i++)
1401 sectors += get_sectors_written(FDEV(i).bdev);
1402
1403 return sectors;
1404 }
1405
1406 return get_sectors_written(sbi->sb->s_bdev);
1407}
1408
1409static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1410{
1411 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1412 struct f2fs_nm_info *nm_i = NM_I(sbi);
1413 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
1414 block_t start_blk;
1415 unsigned int data_sum_blocks, orphan_blocks;
1416 __u32 crc32 = 0;
1417 int i;
1418 int cp_payload_blks = __cp_payload(sbi);
1419 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1420 u64 kbytes_written;
1421 int err;
1422
1423 /* Flush all the NAT/SIT pages */
1424 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1425
1426 /* start to update checkpoint, cp ver is already updated previously */
1427 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1428 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1429 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1430 ckpt->cur_node_segno[i] =
1431 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1432 ckpt->cur_node_blkoff[i] =
1433 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1434 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1435 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1436 }
1437 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1438 ckpt->cur_data_segno[i] =
1439 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1440 ckpt->cur_data_blkoff[i] =
1441 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1442 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1443 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1444 }
1445
1446 /* 2 cp + n data seg summary + orphan inode blocks */
1447 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
1448 spin_lock_irqsave(&sbi->cp_lock, flags);
1449 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
1450 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1451 else
1452 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1453 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1454
1455 orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
1456 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
1457 orphan_blocks);
1458
1459 if (__remain_node_summaries(cpc->reason))
1460 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1461 cp_payload_blks + data_sum_blocks +
1462 orphan_blocks + NR_CURSEG_NODE_TYPE);
1463 else
1464 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1465 cp_payload_blks + data_sum_blocks +
1466 orphan_blocks);
1467
1468 /* update ckpt flag for checkpoint */
1469 update_ckpt_flags(sbi, cpc);
1470
1471 /* update SIT/NAT bitmap */
1472 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1473 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1474
1475 crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
1476 *((__le32 *)((unsigned char *)ckpt +
1477 le32_to_cpu(ckpt->checksum_offset)))
1478 = cpu_to_le32(crc32);
1479
1480 start_blk = __start_cp_next_addr(sbi);
1481
1482 /* write nat bits */
1483 if (enabled_nat_bits(sbi, cpc)) {
1484 __u64 cp_ver = cur_cp_version(ckpt);
1485 block_t blk;
1486
1487 cp_ver |= ((__u64)crc32 << 32);
1488 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1489
1490 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1491 for (i = 0; i < nm_i->nat_bits_blocks; i++)
1492 f2fs_update_meta_page(sbi, nm_i->nat_bits +
1493 (i << F2FS_BLKSIZE_BITS), blk + i);
1494 }
1495
1496 /* write out checkpoint buffer at block 0 */
1497 f2fs_update_meta_page(sbi, ckpt, start_blk++);
1498
1499 for (i = 1; i < 1 + cp_payload_blks; i++)
1500 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1501 start_blk++);
1502
1503 if (orphan_num) {
1504 write_orphan_inodes(sbi, start_blk);
1505 start_blk += orphan_blocks;
1506 }
1507
1508 f2fs_write_data_summaries(sbi, start_blk);
1509 start_blk += data_sum_blocks;
1510
1511 /* Record write statistics in the hot node summary */
1512 kbytes_written = sbi->kbytes_written;
1513 kbytes_written += (f2fs_get_sectors_written(sbi) -
1514 sbi->sectors_written_start) >> 1;
1515 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
1516
1517 if (__remain_node_summaries(cpc->reason)) {
1518 f2fs_write_node_summaries(sbi, start_blk);
1519 start_blk += NR_CURSEG_NODE_TYPE;
1520 }
1521
1522 /* update user_block_counts */
1523 sbi->last_valid_block_count = sbi->total_valid_block_count;
1524 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1525
1526 /* Here, we have one bio having CP pack except cp pack 2 page */
1527 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1528 /* Wait for all dirty meta pages to be submitted for IO */
1529 f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
1530
1531 /* wait for previous submitted meta pages writeback */
1532 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1533
1534 /* flush all device cache */
1535 err = f2fs_flush_device_cache(sbi);
1536 if (err)
1537 return err;
1538
1539 /* barrier and flush checkpoint cp pack 2 page if it can */
1540 commit_checkpoint(sbi, ckpt, start_blk);
1541 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1542
1543 /*
1544 * invalidate intermediate page cache borrowed from meta inode which are
1545 * used for migration of encrypted, verity or compressed inode's blocks.
1546 */
1547 if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
1548 f2fs_sb_has_compression(sbi))
1549 invalidate_mapping_pages(META_MAPPING(sbi),
1550 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
1551
1552 f2fs_release_ino_entry(sbi, false);
1553
1554 f2fs_reset_fsync_node_info(sbi);
1555
1556 clear_sbi_flag(sbi, SBI_IS_DIRTY);
1557 clear_sbi_flag(sbi, SBI_NEED_CP);
1558 clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1559
1560 spin_lock(&sbi->stat_lock);
1561 sbi->unusable_block_count = 0;
1562 spin_unlock(&sbi->stat_lock);
1563
1564 __set_cp_next_pack(sbi);
1565
1566 /*
1567 * redirty superblock if metadata like node page or inode cache is
1568 * updated during writing checkpoint.
1569 */
1570 if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1571 get_pages(sbi, F2FS_DIRTY_IMETA))
1572 set_sbi_flag(sbi, SBI_IS_DIRTY);
1573
1574 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1575
1576 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1577}
1578
1579int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1580{
1581 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1582 unsigned long long ckpt_ver;
1583 int err = 0;
1584
1585 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1586 return -EROFS;
1587
1588 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1589 if (cpc->reason != CP_PAUSE)
1590 return 0;
1591 f2fs_warn(sbi, "Start checkpoint disabled!");
1592 }
1593 if (cpc->reason != CP_RESIZE)
1594 down_write(&sbi->cp_global_sem);
1595
1596 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1597 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1598 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1599 goto out;
1600 if (unlikely(f2fs_cp_error(sbi))) {
1601 err = -EIO;
1602 goto out;
1603 }
1604
1605 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1606
1607 err = block_operations(sbi);
1608 if (err)
1609 goto out;
1610
1611 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1612
1613 f2fs_flush_merged_writes(sbi);
1614
1615 /* this is the case of multiple fstrims without any changes */
1616 if (cpc->reason & CP_DISCARD) {
1617 if (!f2fs_exist_trim_candidates(sbi, cpc)) {
1618 unblock_operations(sbi);
1619 goto out;
1620 }
1621
1622 if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
1623 SIT_I(sbi)->dirty_sentries == 0 &&
1624 prefree_segments(sbi) == 0) {
1625 f2fs_flush_sit_entries(sbi, cpc);
1626 f2fs_clear_prefree_segments(sbi, cpc);
1627 unblock_operations(sbi);
1628 goto out;
1629 }
1630 }
1631
1632 /*
1633 * update checkpoint pack index
1634 * Increase the version number so that
1635 * SIT entries and seg summaries are written at correct place
1636 */
1637 ckpt_ver = cur_cp_version(ckpt);
1638 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1639
1640 /* write cached NAT/SIT entries to NAT/SIT area */
1641 err = f2fs_flush_nat_entries(sbi, cpc);
1642 if (err)
1643 goto stop;
1644
1645 f2fs_flush_sit_entries(sbi, cpc);
1646
1647 /* save inmem log status */
1648 f2fs_save_inmem_curseg(sbi);
1649
1650 err = do_checkpoint(sbi, cpc);
1651 if (err)
1652 f2fs_release_discard_addrs(sbi);
1653 else
1654 f2fs_clear_prefree_segments(sbi, cpc);
1655
1656 f2fs_restore_inmem_curseg(sbi);
1657stop:
1658 unblock_operations(sbi);
1659 stat_inc_cp_count(sbi->stat_info);
1660
1661 if (cpc->reason & CP_RECOVERY)
1662 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1663
1664 /* update CP_TIME to trigger checkpoint periodically */
1665 f2fs_update_time(sbi, CP_TIME);
1666 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1667out:
1668 if (cpc->reason != CP_RESIZE)
1669 up_write(&sbi->cp_global_sem);
1670 return err;
1671}
1672
1673void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1674{
1675 int i;
1676
1677 for (i = 0; i < MAX_INO_ENTRY; i++) {
1678 struct inode_management *im = &sbi->im[i];
1679
1680 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1681 spin_lock_init(&im->ino_lock);
1682 INIT_LIST_HEAD(&im->ino_list);
1683 im->ino_num = 0;
1684 }
1685
1686 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1687 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *
1688 F2FS_ORPHANS_PER_BLOCK;
1689}
1690
1691int __init f2fs_create_checkpoint_caches(void)
1692{
1693 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1694 sizeof(struct ino_entry));
1695 if (!ino_entry_slab)
1696 return -ENOMEM;
1697 f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
1698 sizeof(struct inode_entry));
1699 if (!f2fs_inode_entry_slab) {
1700 kmem_cache_destroy(ino_entry_slab);
1701 return -ENOMEM;
1702 }
1703 return 0;
1704}
1705
1706void f2fs_destroy_checkpoint_caches(void)
1707{
1708 kmem_cache_destroy(ino_entry_slab);
1709 kmem_cache_destroy(f2fs_inode_entry_slab);
1710}
1711
1712static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
1713{
1714 struct cp_control cpc = { .reason = CP_SYNC, };
1715 int err;
1716
1717 down_write(&sbi->gc_lock);
1718 err = f2fs_write_checkpoint(sbi, &cpc);
1719 up_write(&sbi->gc_lock);
1720
1721 return err;
1722}
1723
1724static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
1725{
1726 struct ckpt_req_control *cprc = &sbi->cprc_info;
1727 struct ckpt_req *req, *next;
1728 struct llist_node *dispatch_list;
1729 u64 sum_diff = 0, diff, count = 0;
1730 int ret;
1731
1732 dispatch_list = llist_del_all(&cprc->issue_list);
1733 if (!dispatch_list)
1734 return;
1735 dispatch_list = llist_reverse_order(dispatch_list);
1736
1737 ret = __write_checkpoint_sync(sbi);
1738 atomic_inc(&cprc->issued_ckpt);
1739
1740 llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
1741 diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
1742 req->ret = ret;
1743 complete(&req->wait);
1744
1745 sum_diff += diff;
1746 count++;
1747 }
1748 atomic_sub(count, &cprc->queued_ckpt);
1749 atomic_add(count, &cprc->total_ckpt);
1750
1751 spin_lock(&cprc->stat_lock);
1752 cprc->cur_time = (unsigned int)div64_u64(sum_diff, count);
1753 if (cprc->peak_time < cprc->cur_time)
1754 cprc->peak_time = cprc->cur_time;
1755 spin_unlock(&cprc->stat_lock);
1756}
1757
1758static int issue_checkpoint_thread(void *data)
1759{
1760 struct f2fs_sb_info *sbi = data;
1761 struct ckpt_req_control *cprc = &sbi->cprc_info;
1762 wait_queue_head_t *q = &cprc->ckpt_wait_queue;
1763repeat:
1764 if (kthread_should_stop())
1765 return 0;
1766
1767 if (!llist_empty(&cprc->issue_list))
1768 __checkpoint_and_complete_reqs(sbi);
1769
1770 wait_event_interruptible(*q,
1771 kthread_should_stop() || !llist_empty(&cprc->issue_list));
1772 goto repeat;
1773}
1774
1775static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi,
1776 struct ckpt_req *wait_req)
1777{
1778 struct ckpt_req_control *cprc = &sbi->cprc_info;
1779
1780 if (!llist_empty(&cprc->issue_list)) {
1781 __checkpoint_and_complete_reqs(sbi);
1782 } else {
1783 /* already dispatched by issue_checkpoint_thread */
1784 if (wait_req)
1785 wait_for_completion(&wait_req->wait);
1786 }
1787}
1788
1789static void init_ckpt_req(struct ckpt_req *req)
1790{
1791 memset(req, 0, sizeof(struct ckpt_req));
1792
1793 init_completion(&req->wait);
1794 req->queue_time = ktime_get();
1795}
1796
1797int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
1798{
1799 struct ckpt_req_control *cprc = &sbi->cprc_info;
1800 struct ckpt_req req;
1801 struct cp_control cpc;
1802
1803 cpc.reason = __get_cp_reason(sbi);
1804 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
1805 int ret;
1806
1807 down_write(&sbi->gc_lock);
1808 ret = f2fs_write_checkpoint(sbi, &cpc);
1809 up_write(&sbi->gc_lock);
1810
1811 return ret;
1812 }
1813
1814 if (!cprc->f2fs_issue_ckpt)
1815 return __write_checkpoint_sync(sbi);
1816
1817 init_ckpt_req(&req);
1818
1819 llist_add(&req.llnode, &cprc->issue_list);
1820 atomic_inc(&cprc->queued_ckpt);
1821
1822 /*
1823 * update issue_list before we wake up issue_checkpoint thread,
1824 * this smp_mb() pairs with another barrier in ___wait_event(),
1825 * see more details in comments of waitqueue_active().
1826 */
1827 smp_mb();
1828
1829 if (waitqueue_active(&cprc->ckpt_wait_queue))
1830 wake_up(&cprc->ckpt_wait_queue);
1831
1832 if (cprc->f2fs_issue_ckpt)
1833 wait_for_completion(&req.wait);
1834 else
1835 flush_remained_ckpt_reqs(sbi, &req);
1836
1837 return req.ret;
1838}
1839
1840int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
1841{
1842 dev_t dev = sbi->sb->s_bdev->bd_dev;
1843 struct ckpt_req_control *cprc = &sbi->cprc_info;
1844
1845 if (cprc->f2fs_issue_ckpt)
1846 return 0;
1847
1848 cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi,
1849 "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev));
1850 if (IS_ERR(cprc->f2fs_issue_ckpt)) {
1851 cprc->f2fs_issue_ckpt = NULL;
1852 return -ENOMEM;
1853 }
1854
1855 set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio);
1856
1857 return 0;
1858}
1859
1860void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
1861{
1862 struct ckpt_req_control *cprc = &sbi->cprc_info;
1863
1864 if (cprc->f2fs_issue_ckpt) {
1865 struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
1866
1867 cprc->f2fs_issue_ckpt = NULL;
1868 kthread_stop(ckpt_task);
1869
1870 flush_remained_ckpt_reqs(sbi, NULL);
1871 }
1872}
1873
1874void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
1875{
1876 struct ckpt_req_control *cprc = &sbi->cprc_info;
1877
1878 atomic_set(&cprc->issued_ckpt, 0);
1879 atomic_set(&cprc->total_ckpt, 0);
1880 atomic_set(&cprc->queued_ckpt, 0);
1881 cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO;
1882 init_waitqueue_head(&cprc->ckpt_wait_queue);
1883 init_llist_head(&cprc->issue_list);
1884 spin_lock_init(&cprc->stat_lock);
1885}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/bio.h>
10#include <linux/mpage.h>
11#include <linux/writeback.h>
12#include <linux/blkdev.h>
13#include <linux/f2fs_fs.h>
14#include <linux/pagevec.h>
15#include <linux/swap.h>
16
17#include "f2fs.h"
18#include "node.h"
19#include "segment.h"
20#include "trace.h"
21#include <trace/events/f2fs.h>
22
23static struct kmem_cache *ino_entry_slab;
24struct kmem_cache *f2fs_inode_entry_slab;
25
26void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
27{
28 f2fs_build_fault_attr(sbi, 0, 0);
29 set_ckpt_flags(sbi, CP_ERROR_FLAG);
30 if (!end_io)
31 f2fs_flush_merged_writes(sbi);
32}
33
34/*
35 * We guarantee no failure on the returned page.
36 */
37struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
38{
39 struct address_space *mapping = META_MAPPING(sbi);
40 struct page *page = NULL;
41repeat:
42 page = f2fs_grab_cache_page(mapping, index, false);
43 if (!page) {
44 cond_resched();
45 goto repeat;
46 }
47 f2fs_wait_on_page_writeback(page, META, true, true);
48 if (!PageUptodate(page))
49 SetPageUptodate(page);
50 return page;
51}
52
53/*
54 * We guarantee no failure on the returned page.
55 */
56static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
57 bool is_meta)
58{
59 struct address_space *mapping = META_MAPPING(sbi);
60 struct page *page;
61 struct f2fs_io_info fio = {
62 .sbi = sbi,
63 .type = META,
64 .op = REQ_OP_READ,
65 .op_flags = REQ_META | REQ_PRIO,
66 .old_blkaddr = index,
67 .new_blkaddr = index,
68 .encrypted_page = NULL,
69 .is_por = !is_meta,
70 };
71 int err;
72
73 if (unlikely(!is_meta))
74 fio.op_flags &= ~REQ_META;
75repeat:
76 page = f2fs_grab_cache_page(mapping, index, false);
77 if (!page) {
78 cond_resched();
79 goto repeat;
80 }
81 if (PageUptodate(page))
82 goto out;
83
84 fio.page = page;
85
86 err = f2fs_submit_page_bio(&fio);
87 if (err) {
88 f2fs_put_page(page, 1);
89 return ERR_PTR(err);
90 }
91
92 lock_page(page);
93 if (unlikely(page->mapping != mapping)) {
94 f2fs_put_page(page, 1);
95 goto repeat;
96 }
97
98 if (unlikely(!PageUptodate(page))) {
99 f2fs_put_page(page, 1);
100 return ERR_PTR(-EIO);
101 }
102out:
103 return page;
104}
105
106struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
107{
108 return __get_meta_page(sbi, index, true);
109}
110
111struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
112{
113 struct page *page;
114 int count = 0;
115
116retry:
117 page = __get_meta_page(sbi, index, true);
118 if (IS_ERR(page)) {
119 if (PTR_ERR(page) == -EIO &&
120 ++count <= DEFAULT_RETRY_IO_COUNT)
121 goto retry;
122 f2fs_stop_checkpoint(sbi, false);
123 }
124 return page;
125}
126
127/* for POR only */
128struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
129{
130 return __get_meta_page(sbi, index, false);
131}
132
133static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
134 int type)
135{
136 struct seg_entry *se;
137 unsigned int segno, offset;
138 bool exist;
139
140 if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
141 return true;
142
143 segno = GET_SEGNO(sbi, blkaddr);
144 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
145 se = get_seg_entry(sbi, segno);
146
147 exist = f2fs_test_bit(offset, se->cur_valid_map);
148 if (!exist && type == DATA_GENERIC_ENHANCE) {
149 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
150 blkaddr, exist);
151 set_sbi_flag(sbi, SBI_NEED_FSCK);
152 WARN_ON(1);
153 }
154 return exist;
155}
156
157bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
158 block_t blkaddr, int type)
159{
160 switch (type) {
161 case META_NAT:
162 break;
163 case META_SIT:
164 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
165 return false;
166 break;
167 case META_SSA:
168 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
169 blkaddr < SM_I(sbi)->ssa_blkaddr))
170 return false;
171 break;
172 case META_CP:
173 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
174 blkaddr < __start_cp_addr(sbi)))
175 return false;
176 break;
177 case META_POR:
178 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
179 blkaddr < MAIN_BLKADDR(sbi)))
180 return false;
181 break;
182 case DATA_GENERIC:
183 case DATA_GENERIC_ENHANCE:
184 case DATA_GENERIC_ENHANCE_READ:
185 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
186 blkaddr < MAIN_BLKADDR(sbi))) {
187 f2fs_warn(sbi, "access invalid blkaddr:%u",
188 blkaddr);
189 set_sbi_flag(sbi, SBI_NEED_FSCK);
190 WARN_ON(1);
191 return false;
192 } else {
193 return __is_bitmap_valid(sbi, blkaddr, type);
194 }
195 break;
196 case META_GENERIC:
197 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
198 blkaddr >= MAIN_BLKADDR(sbi)))
199 return false;
200 break;
201 default:
202 BUG();
203 }
204
205 return true;
206}
207
208/*
209 * Readahead CP/NAT/SIT/SSA pages
210 */
211int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
212 int type, bool sync)
213{
214 struct page *page;
215 block_t blkno = start;
216 struct f2fs_io_info fio = {
217 .sbi = sbi,
218 .type = META,
219 .op = REQ_OP_READ,
220 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
221 .encrypted_page = NULL,
222 .in_list = false,
223 .is_por = (type == META_POR),
224 };
225 struct blk_plug plug;
226
227 if (unlikely(type == META_POR))
228 fio.op_flags &= ~REQ_META;
229
230 blk_start_plug(&plug);
231 for (; nrpages-- > 0; blkno++) {
232
233 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
234 goto out;
235
236 switch (type) {
237 case META_NAT:
238 if (unlikely(blkno >=
239 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
240 blkno = 0;
241 /* get nat block addr */
242 fio.new_blkaddr = current_nat_addr(sbi,
243 blkno * NAT_ENTRY_PER_BLOCK);
244 break;
245 case META_SIT:
246 /* get sit block addr */
247 fio.new_blkaddr = current_sit_addr(sbi,
248 blkno * SIT_ENTRY_PER_BLOCK);
249 break;
250 case META_SSA:
251 case META_CP:
252 case META_POR:
253 fio.new_blkaddr = blkno;
254 break;
255 default:
256 BUG();
257 }
258
259 page = f2fs_grab_cache_page(META_MAPPING(sbi),
260 fio.new_blkaddr, false);
261 if (!page)
262 continue;
263 if (PageUptodate(page)) {
264 f2fs_put_page(page, 1);
265 continue;
266 }
267
268 fio.page = page;
269 f2fs_submit_page_bio(&fio);
270 f2fs_put_page(page, 0);
271 }
272out:
273 blk_finish_plug(&plug);
274 return blkno - start;
275}
276
277void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
278{
279 struct page *page;
280 bool readahead = false;
281
282 page = find_get_page(META_MAPPING(sbi), index);
283 if (!page || !PageUptodate(page))
284 readahead = true;
285 f2fs_put_page(page, 0);
286
287 if (readahead)
288 f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
289}
290
291static int __f2fs_write_meta_page(struct page *page,
292 struct writeback_control *wbc,
293 enum iostat_type io_type)
294{
295 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
296
297 trace_f2fs_writepage(page, META);
298
299 if (unlikely(f2fs_cp_error(sbi)))
300 goto redirty_out;
301 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
302 goto redirty_out;
303 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
304 goto redirty_out;
305
306 f2fs_do_write_meta_page(sbi, page, io_type);
307 dec_page_count(sbi, F2FS_DIRTY_META);
308
309 if (wbc->for_reclaim)
310 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
311
312 unlock_page(page);
313
314 if (unlikely(f2fs_cp_error(sbi)))
315 f2fs_submit_merged_write(sbi, META);
316
317 return 0;
318
319redirty_out:
320 redirty_page_for_writepage(wbc, page);
321 return AOP_WRITEPAGE_ACTIVATE;
322}
323
324static int f2fs_write_meta_page(struct page *page,
325 struct writeback_control *wbc)
326{
327 return __f2fs_write_meta_page(page, wbc, FS_META_IO);
328}
329
330static int f2fs_write_meta_pages(struct address_space *mapping,
331 struct writeback_control *wbc)
332{
333 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
334 long diff, written;
335
336 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
337 goto skip_write;
338
339 /* collect a number of dirty meta pages and write together */
340 if (wbc->sync_mode != WB_SYNC_ALL &&
341 get_pages(sbi, F2FS_DIRTY_META) <
342 nr_pages_to_skip(sbi, META))
343 goto skip_write;
344
345 /* if locked failed, cp will flush dirty pages instead */
346 if (!mutex_trylock(&sbi->cp_mutex))
347 goto skip_write;
348
349 trace_f2fs_writepages(mapping->host, wbc, META);
350 diff = nr_pages_to_write(sbi, META, wbc);
351 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
352 mutex_unlock(&sbi->cp_mutex);
353 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
354 return 0;
355
356skip_write:
357 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
358 trace_f2fs_writepages(mapping->host, wbc, META);
359 return 0;
360}
361
362long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
363 long nr_to_write, enum iostat_type io_type)
364{
365 struct address_space *mapping = META_MAPPING(sbi);
366 pgoff_t index = 0, prev = ULONG_MAX;
367 struct pagevec pvec;
368 long nwritten = 0;
369 int nr_pages;
370 struct writeback_control wbc = {
371 .for_reclaim = 0,
372 };
373 struct blk_plug plug;
374
375 pagevec_init(&pvec);
376
377 blk_start_plug(&plug);
378
379 while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
380 PAGECACHE_TAG_DIRTY))) {
381 int i;
382
383 for (i = 0; i < nr_pages; i++) {
384 struct page *page = pvec.pages[i];
385
386 if (prev == ULONG_MAX)
387 prev = page->index - 1;
388 if (nr_to_write != LONG_MAX && page->index != prev + 1) {
389 pagevec_release(&pvec);
390 goto stop;
391 }
392
393 lock_page(page);
394
395 if (unlikely(page->mapping != mapping)) {
396continue_unlock:
397 unlock_page(page);
398 continue;
399 }
400 if (!PageDirty(page)) {
401 /* someone wrote it for us */
402 goto continue_unlock;
403 }
404
405 f2fs_wait_on_page_writeback(page, META, true, true);
406
407 if (!clear_page_dirty_for_io(page))
408 goto continue_unlock;
409
410 if (__f2fs_write_meta_page(page, &wbc, io_type)) {
411 unlock_page(page);
412 break;
413 }
414 nwritten++;
415 prev = page->index;
416 if (unlikely(nwritten >= nr_to_write))
417 break;
418 }
419 pagevec_release(&pvec);
420 cond_resched();
421 }
422stop:
423 if (nwritten)
424 f2fs_submit_merged_write(sbi, type);
425
426 blk_finish_plug(&plug);
427
428 return nwritten;
429}
430
431static int f2fs_set_meta_page_dirty(struct page *page)
432{
433 trace_f2fs_set_page_dirty(page, META);
434
435 if (!PageUptodate(page))
436 SetPageUptodate(page);
437 if (!PageDirty(page)) {
438 __set_page_dirty_nobuffers(page);
439 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
440 f2fs_set_page_private(page, 0);
441 f2fs_trace_pid(page);
442 return 1;
443 }
444 return 0;
445}
446
447const struct address_space_operations f2fs_meta_aops = {
448 .writepage = f2fs_write_meta_page,
449 .writepages = f2fs_write_meta_pages,
450 .set_page_dirty = f2fs_set_meta_page_dirty,
451 .invalidatepage = f2fs_invalidate_page,
452 .releasepage = f2fs_release_page,
453#ifdef CONFIG_MIGRATION
454 .migratepage = f2fs_migrate_page,
455#endif
456};
457
458static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
459 unsigned int devidx, int type)
460{
461 struct inode_management *im = &sbi->im[type];
462 struct ino_entry *e, *tmp;
463
464 tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
465
466 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
467
468 spin_lock(&im->ino_lock);
469 e = radix_tree_lookup(&im->ino_root, ino);
470 if (!e) {
471 e = tmp;
472 if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
473 f2fs_bug_on(sbi, 1);
474
475 memset(e, 0, sizeof(struct ino_entry));
476 e->ino = ino;
477
478 list_add_tail(&e->list, &im->ino_list);
479 if (type != ORPHAN_INO)
480 im->ino_num++;
481 }
482
483 if (type == FLUSH_INO)
484 f2fs_set_bit(devidx, (char *)&e->dirty_device);
485
486 spin_unlock(&im->ino_lock);
487 radix_tree_preload_end();
488
489 if (e != tmp)
490 kmem_cache_free(ino_entry_slab, tmp);
491}
492
493static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
494{
495 struct inode_management *im = &sbi->im[type];
496 struct ino_entry *e;
497
498 spin_lock(&im->ino_lock);
499 e = radix_tree_lookup(&im->ino_root, ino);
500 if (e) {
501 list_del(&e->list);
502 radix_tree_delete(&im->ino_root, ino);
503 im->ino_num--;
504 spin_unlock(&im->ino_lock);
505 kmem_cache_free(ino_entry_slab, e);
506 return;
507 }
508 spin_unlock(&im->ino_lock);
509}
510
511void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
512{
513 /* add new dirty ino entry into list */
514 __add_ino_entry(sbi, ino, 0, type);
515}
516
517void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
518{
519 /* remove dirty ino entry from list */
520 __remove_ino_entry(sbi, ino, type);
521}
522
523/* mode should be APPEND_INO or UPDATE_INO */
524bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
525{
526 struct inode_management *im = &sbi->im[mode];
527 struct ino_entry *e;
528
529 spin_lock(&im->ino_lock);
530 e = radix_tree_lookup(&im->ino_root, ino);
531 spin_unlock(&im->ino_lock);
532 return e ? true : false;
533}
534
535void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
536{
537 struct ino_entry *e, *tmp;
538 int i;
539
540 for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) {
541 struct inode_management *im = &sbi->im[i];
542
543 spin_lock(&im->ino_lock);
544 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
545 list_del(&e->list);
546 radix_tree_delete(&im->ino_root, e->ino);
547 kmem_cache_free(ino_entry_slab, e);
548 im->ino_num--;
549 }
550 spin_unlock(&im->ino_lock);
551 }
552}
553
554void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
555 unsigned int devidx, int type)
556{
557 __add_ino_entry(sbi, ino, devidx, type);
558}
559
560bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
561 unsigned int devidx, int type)
562{
563 struct inode_management *im = &sbi->im[type];
564 struct ino_entry *e;
565 bool is_dirty = false;
566
567 spin_lock(&im->ino_lock);
568 e = radix_tree_lookup(&im->ino_root, ino);
569 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
570 is_dirty = true;
571 spin_unlock(&im->ino_lock);
572 return is_dirty;
573}
574
575int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
576{
577 struct inode_management *im = &sbi->im[ORPHAN_INO];
578 int err = 0;
579
580 spin_lock(&im->ino_lock);
581
582 if (time_to_inject(sbi, FAULT_ORPHAN)) {
583 spin_unlock(&im->ino_lock);
584 f2fs_show_injection_info(FAULT_ORPHAN);
585 return -ENOSPC;
586 }
587
588 if (unlikely(im->ino_num >= sbi->max_orphans))
589 err = -ENOSPC;
590 else
591 im->ino_num++;
592 spin_unlock(&im->ino_lock);
593
594 return err;
595}
596
597void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
598{
599 struct inode_management *im = &sbi->im[ORPHAN_INO];
600
601 spin_lock(&im->ino_lock);
602 f2fs_bug_on(sbi, im->ino_num == 0);
603 im->ino_num--;
604 spin_unlock(&im->ino_lock);
605}
606
607void f2fs_add_orphan_inode(struct inode *inode)
608{
609 /* add new orphan ino entry into list */
610 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
611 f2fs_update_inode_page(inode);
612}
613
614void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
615{
616 /* remove orphan entry from orphan list */
617 __remove_ino_entry(sbi, ino, ORPHAN_INO);
618}
619
620static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
621{
622 struct inode *inode;
623 struct node_info ni;
624 int err;
625
626 inode = f2fs_iget_retry(sbi->sb, ino);
627 if (IS_ERR(inode)) {
628 /*
629 * there should be a bug that we can't find the entry
630 * to orphan inode.
631 */
632 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
633 return PTR_ERR(inode);
634 }
635
636 err = dquot_initialize(inode);
637 if (err) {
638 iput(inode);
639 goto err_out;
640 }
641
642 clear_nlink(inode);
643
644 /* truncate all the data during iput */
645 iput(inode);
646
647 err = f2fs_get_node_info(sbi, ino, &ni);
648 if (err)
649 goto err_out;
650
651 /* ENOMEM was fully retried in f2fs_evict_inode. */
652 if (ni.blk_addr != NULL_ADDR) {
653 err = -EIO;
654 goto err_out;
655 }
656 return 0;
657
658err_out:
659 set_sbi_flag(sbi, SBI_NEED_FSCK);
660 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
661 __func__, ino);
662 return err;
663}
664
665int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
666{
667 block_t start_blk, orphan_blocks, i, j;
668 unsigned int s_flags = sbi->sb->s_flags;
669 int err = 0;
670#ifdef CONFIG_QUOTA
671 int quota_enabled;
672#endif
673
674 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
675 return 0;
676
677 if (bdev_read_only(sbi->sb->s_bdev)) {
678 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
679 return 0;
680 }
681
682 if (s_flags & SB_RDONLY) {
683 f2fs_info(sbi, "orphan cleanup on readonly fs");
684 sbi->sb->s_flags &= ~SB_RDONLY;
685 }
686
687#ifdef CONFIG_QUOTA
688 /* Needed for iput() to work correctly and not trash data */
689 sbi->sb->s_flags |= SB_ACTIVE;
690
691 /*
692 * Turn on quotas which were not enabled for read-only mounts if
693 * filesystem has quota feature, so that they are updated correctly.
694 */
695 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
696#endif
697
698 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
699 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
700
701 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
702
703 for (i = 0; i < orphan_blocks; i++) {
704 struct page *page;
705 struct f2fs_orphan_block *orphan_blk;
706
707 page = f2fs_get_meta_page(sbi, start_blk + i);
708 if (IS_ERR(page)) {
709 err = PTR_ERR(page);
710 goto out;
711 }
712
713 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
714 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
715 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
716 err = recover_orphan_inode(sbi, ino);
717 if (err) {
718 f2fs_put_page(page, 1);
719 goto out;
720 }
721 }
722 f2fs_put_page(page, 1);
723 }
724 /* clear Orphan Flag */
725 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
726out:
727 set_sbi_flag(sbi, SBI_IS_RECOVERED);
728
729#ifdef CONFIG_QUOTA
730 /* Turn quotas off */
731 if (quota_enabled)
732 f2fs_quota_off_umount(sbi->sb);
733#endif
734 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
735
736 return err;
737}
738
739static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
740{
741 struct list_head *head;
742 struct f2fs_orphan_block *orphan_blk = NULL;
743 unsigned int nentries = 0;
744 unsigned short index = 1;
745 unsigned short orphan_blocks;
746 struct page *page = NULL;
747 struct ino_entry *orphan = NULL;
748 struct inode_management *im = &sbi->im[ORPHAN_INO];
749
750 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
751
752 /*
753 * we don't need to do spin_lock(&im->ino_lock) here, since all the
754 * orphan inode operations are covered under f2fs_lock_op().
755 * And, spin_lock should be avoided due to page operations below.
756 */
757 head = &im->ino_list;
758
759 /* loop for each orphan inode entry and write them in Jornal block */
760 list_for_each_entry(orphan, head, list) {
761 if (!page) {
762 page = f2fs_grab_meta_page(sbi, start_blk++);
763 orphan_blk =
764 (struct f2fs_orphan_block *)page_address(page);
765 memset(orphan_blk, 0, sizeof(*orphan_blk));
766 }
767
768 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
769
770 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
771 /*
772 * an orphan block is full of 1020 entries,
773 * then we need to flush current orphan blocks
774 * and bring another one in memory
775 */
776 orphan_blk->blk_addr = cpu_to_le16(index);
777 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
778 orphan_blk->entry_count = cpu_to_le32(nentries);
779 set_page_dirty(page);
780 f2fs_put_page(page, 1);
781 index++;
782 nentries = 0;
783 page = NULL;
784 }
785 }
786
787 if (page) {
788 orphan_blk->blk_addr = cpu_to_le16(index);
789 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
790 orphan_blk->entry_count = cpu_to_le32(nentries);
791 set_page_dirty(page);
792 f2fs_put_page(page, 1);
793 }
794}
795
796static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
797 struct f2fs_checkpoint *ckpt)
798{
799 unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
800 __u32 chksum;
801
802 chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
803 if (chksum_ofs < CP_CHKSUM_OFFSET) {
804 chksum_ofs += sizeof(chksum);
805 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
806 F2FS_BLKSIZE - chksum_ofs);
807 }
808 return chksum;
809}
810
811static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
812 struct f2fs_checkpoint **cp_block, struct page **cp_page,
813 unsigned long long *version)
814{
815 size_t crc_offset = 0;
816 __u32 crc;
817
818 *cp_page = f2fs_get_meta_page(sbi, cp_addr);
819 if (IS_ERR(*cp_page))
820 return PTR_ERR(*cp_page);
821
822 *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
823
824 crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
825 if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
826 crc_offset > CP_CHKSUM_OFFSET) {
827 f2fs_put_page(*cp_page, 1);
828 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
829 return -EINVAL;
830 }
831
832 crc = f2fs_checkpoint_chksum(sbi, *cp_block);
833 if (crc != cur_cp_crc(*cp_block)) {
834 f2fs_put_page(*cp_page, 1);
835 f2fs_warn(sbi, "invalid crc value");
836 return -EINVAL;
837 }
838
839 *version = cur_cp_version(*cp_block);
840 return 0;
841}
842
843static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
844 block_t cp_addr, unsigned long long *version)
845{
846 struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
847 struct f2fs_checkpoint *cp_block = NULL;
848 unsigned long long cur_version = 0, pre_version = 0;
849 int err;
850
851 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
852 &cp_page_1, version);
853 if (err)
854 return NULL;
855
856 if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
857 sbi->blocks_per_seg) {
858 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
859 le32_to_cpu(cp_block->cp_pack_total_block_count));
860 goto invalid_cp;
861 }
862 pre_version = *version;
863
864 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
865 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
866 &cp_page_2, version);
867 if (err)
868 goto invalid_cp;
869 cur_version = *version;
870
871 if (cur_version == pre_version) {
872 *version = cur_version;
873 f2fs_put_page(cp_page_2, 1);
874 return cp_page_1;
875 }
876 f2fs_put_page(cp_page_2, 1);
877invalid_cp:
878 f2fs_put_page(cp_page_1, 1);
879 return NULL;
880}
881
882int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
883{
884 struct f2fs_checkpoint *cp_block;
885 struct f2fs_super_block *fsb = sbi->raw_super;
886 struct page *cp1, *cp2, *cur_page;
887 unsigned long blk_size = sbi->blocksize;
888 unsigned long long cp1_version = 0, cp2_version = 0;
889 unsigned long long cp_start_blk_no;
890 unsigned int cp_blks = 1 + __cp_payload(sbi);
891 block_t cp_blk_no;
892 int i;
893 int err;
894
895 sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
896 GFP_KERNEL);
897 if (!sbi->ckpt)
898 return -ENOMEM;
899 /*
900 * Finding out valid cp block involves read both
901 * sets( cp pack1 and cp pack 2)
902 */
903 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
904 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
905
906 /* The second checkpoint pack should start at the next segment */
907 cp_start_blk_no += ((unsigned long long)1) <<
908 le32_to_cpu(fsb->log_blocks_per_seg);
909 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
910
911 if (cp1 && cp2) {
912 if (ver_after(cp2_version, cp1_version))
913 cur_page = cp2;
914 else
915 cur_page = cp1;
916 } else if (cp1) {
917 cur_page = cp1;
918 } else if (cp2) {
919 cur_page = cp2;
920 } else {
921 err = -EFSCORRUPTED;
922 goto fail_no_cp;
923 }
924
925 cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
926 memcpy(sbi->ckpt, cp_block, blk_size);
927
928 if (cur_page == cp1)
929 sbi->cur_cp_pack = 1;
930 else
931 sbi->cur_cp_pack = 2;
932
933 /* Sanity checking of checkpoint */
934 if (f2fs_sanity_check_ckpt(sbi)) {
935 err = -EFSCORRUPTED;
936 goto free_fail_no_cp;
937 }
938
939 if (cp_blks <= 1)
940 goto done;
941
942 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
943 if (cur_page == cp2)
944 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
945
946 for (i = 1; i < cp_blks; i++) {
947 void *sit_bitmap_ptr;
948 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
949
950 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
951 if (IS_ERR(cur_page)) {
952 err = PTR_ERR(cur_page);
953 goto free_fail_no_cp;
954 }
955 sit_bitmap_ptr = page_address(cur_page);
956 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
957 f2fs_put_page(cur_page, 1);
958 }
959done:
960 f2fs_put_page(cp1, 1);
961 f2fs_put_page(cp2, 1);
962 return 0;
963
964free_fail_no_cp:
965 f2fs_put_page(cp1, 1);
966 f2fs_put_page(cp2, 1);
967fail_no_cp:
968 kvfree(sbi->ckpt);
969 return err;
970}
971
972static void __add_dirty_inode(struct inode *inode, enum inode_type type)
973{
974 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
975 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
976
977 if (is_inode_flag_set(inode, flag))
978 return;
979
980 set_inode_flag(inode, flag);
981 if (!f2fs_is_volatile_file(inode))
982 list_add_tail(&F2FS_I(inode)->dirty_list,
983 &sbi->inode_list[type]);
984 stat_inc_dirty_inode(sbi, type);
985}
986
987static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
988{
989 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
990
991 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
992 return;
993
994 list_del_init(&F2FS_I(inode)->dirty_list);
995 clear_inode_flag(inode, flag);
996 stat_dec_dirty_inode(F2FS_I_SB(inode), type);
997}
998
999void f2fs_update_dirty_page(struct inode *inode, struct page *page)
1000{
1001 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1002 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1003
1004 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1005 !S_ISLNK(inode->i_mode))
1006 return;
1007
1008 spin_lock(&sbi->inode_lock[type]);
1009 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
1010 __add_dirty_inode(inode, type);
1011 inode_inc_dirty_pages(inode);
1012 spin_unlock(&sbi->inode_lock[type]);
1013
1014 f2fs_set_page_private(page, 0);
1015 f2fs_trace_pid(page);
1016}
1017
1018void f2fs_remove_dirty_inode(struct inode *inode)
1019{
1020 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1021 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1022
1023 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1024 !S_ISLNK(inode->i_mode))
1025 return;
1026
1027 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
1028 return;
1029
1030 spin_lock(&sbi->inode_lock[type]);
1031 __remove_dirty_inode(inode, type);
1032 spin_unlock(&sbi->inode_lock[type]);
1033}
1034
1035int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
1036{
1037 struct list_head *head;
1038 struct inode *inode;
1039 struct f2fs_inode_info *fi;
1040 bool is_dir = (type == DIR_INODE);
1041 unsigned long ino = 0;
1042
1043 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
1044 get_pages(sbi, is_dir ?
1045 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1046retry:
1047 if (unlikely(f2fs_cp_error(sbi)))
1048 return -EIO;
1049
1050 spin_lock(&sbi->inode_lock[type]);
1051
1052 head = &sbi->inode_list[type];
1053 if (list_empty(head)) {
1054 spin_unlock(&sbi->inode_lock[type]);
1055 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1056 get_pages(sbi, is_dir ?
1057 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1058 return 0;
1059 }
1060 fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
1061 inode = igrab(&fi->vfs_inode);
1062 spin_unlock(&sbi->inode_lock[type]);
1063 if (inode) {
1064 unsigned long cur_ino = inode->i_ino;
1065
1066 F2FS_I(inode)->cp_task = current;
1067
1068 filemap_fdatawrite(inode->i_mapping);
1069
1070 F2FS_I(inode)->cp_task = NULL;
1071
1072 iput(inode);
1073 /* We need to give cpu to another writers. */
1074 if (ino == cur_ino)
1075 cond_resched();
1076 else
1077 ino = cur_ino;
1078 } else {
1079 /*
1080 * We should submit bio, since it exists several
1081 * wribacking dentry pages in the freeing inode.
1082 */
1083 f2fs_submit_merged_write(sbi, DATA);
1084 cond_resched();
1085 }
1086 goto retry;
1087}
1088
1089int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
1090{
1091 struct list_head *head = &sbi->inode_list[DIRTY_META];
1092 struct inode *inode;
1093 struct f2fs_inode_info *fi;
1094 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
1095
1096 while (total--) {
1097 if (unlikely(f2fs_cp_error(sbi)))
1098 return -EIO;
1099
1100 spin_lock(&sbi->inode_lock[DIRTY_META]);
1101 if (list_empty(head)) {
1102 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1103 return 0;
1104 }
1105 fi = list_first_entry(head, struct f2fs_inode_info,
1106 gdirty_list);
1107 inode = igrab(&fi->vfs_inode);
1108 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1109 if (inode) {
1110 sync_inode_metadata(inode, 0);
1111
1112 /* it's on eviction */
1113 if (is_inode_flag_set(inode, FI_DIRTY_INODE))
1114 f2fs_update_inode_page(inode);
1115 iput(inode);
1116 }
1117 }
1118 return 0;
1119}
1120
1121static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1122{
1123 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1124 struct f2fs_nm_info *nm_i = NM_I(sbi);
1125 nid_t last_nid = nm_i->next_scan_nid;
1126
1127 next_free_nid(sbi, &last_nid);
1128 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1129 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1130 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1131 ckpt->next_free_nid = cpu_to_le32(last_nid);
1132}
1133
1134static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1135{
1136 bool ret = false;
1137
1138 if (!is_journalled_quota(sbi))
1139 return false;
1140
1141 down_write(&sbi->quota_sem);
1142 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1143 ret = false;
1144 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1145 ret = false;
1146 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1147 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1148 ret = true;
1149 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1150 ret = true;
1151 }
1152 up_write(&sbi->quota_sem);
1153 return ret;
1154}
1155
1156/*
1157 * Freeze all the FS-operations for checkpoint.
1158 */
1159static int block_operations(struct f2fs_sb_info *sbi)
1160{
1161 struct writeback_control wbc = {
1162 .sync_mode = WB_SYNC_ALL,
1163 .nr_to_write = LONG_MAX,
1164 .for_reclaim = 0,
1165 };
1166 struct blk_plug plug;
1167 int err = 0, cnt = 0;
1168
1169 blk_start_plug(&plug);
1170
1171retry_flush_quotas:
1172 f2fs_lock_all(sbi);
1173 if (__need_flush_quota(sbi)) {
1174 int locked;
1175
1176 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1177 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1178 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1179 goto retry_flush_dents;
1180 }
1181 f2fs_unlock_all(sbi);
1182
1183 /* only failed during mount/umount/freeze/quotactl */
1184 locked = down_read_trylock(&sbi->sb->s_umount);
1185 f2fs_quota_sync(sbi->sb, -1);
1186 if (locked)
1187 up_read(&sbi->sb->s_umount);
1188 cond_resched();
1189 goto retry_flush_quotas;
1190 }
1191
1192retry_flush_dents:
1193 /* write all the dirty dentry pages */
1194 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1195 f2fs_unlock_all(sbi);
1196 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
1197 if (err)
1198 goto out;
1199 cond_resched();
1200 goto retry_flush_quotas;
1201 }
1202
1203 /*
1204 * POR: we should ensure that there are no dirty node pages
1205 * until finishing nat/sit flush. inode->i_blocks can be updated.
1206 */
1207 down_write(&sbi->node_change);
1208
1209 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1210 up_write(&sbi->node_change);
1211 f2fs_unlock_all(sbi);
1212 err = f2fs_sync_inode_meta(sbi);
1213 if (err)
1214 goto out;
1215 cond_resched();
1216 goto retry_flush_quotas;
1217 }
1218
1219retry_flush_nodes:
1220 down_write(&sbi->node_write);
1221
1222 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1223 up_write(&sbi->node_write);
1224 atomic_inc(&sbi->wb_sync_req[NODE]);
1225 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1226 atomic_dec(&sbi->wb_sync_req[NODE]);
1227 if (err) {
1228 up_write(&sbi->node_change);
1229 f2fs_unlock_all(sbi);
1230 goto out;
1231 }
1232 cond_resched();
1233 goto retry_flush_nodes;
1234 }
1235
1236 /*
1237 * sbi->node_change is used only for AIO write_begin path which produces
1238 * dirty node blocks and some checkpoint values by block allocation.
1239 */
1240 __prepare_cp_block(sbi);
1241 up_write(&sbi->node_change);
1242out:
1243 blk_finish_plug(&plug);
1244 return err;
1245}
1246
1247static void unblock_operations(struct f2fs_sb_info *sbi)
1248{
1249 up_write(&sbi->node_write);
1250 f2fs_unlock_all(sbi);
1251}
1252
1253void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
1254{
1255 DEFINE_WAIT(wait);
1256
1257 for (;;) {
1258 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1259
1260 if (!get_pages(sbi, F2FS_WB_CP_DATA))
1261 break;
1262
1263 if (unlikely(f2fs_cp_error(sbi)))
1264 break;
1265
1266 io_schedule_timeout(5*HZ);
1267 }
1268 finish_wait(&sbi->cp_wait, &wait);
1269}
1270
1271static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1272{
1273 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1274 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1275 unsigned long flags;
1276
1277 spin_lock_irqsave(&sbi->cp_lock, flags);
1278
1279 if ((cpc->reason & CP_UMOUNT) &&
1280 le32_to_cpu(ckpt->cp_pack_total_block_count) >
1281 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1282 disable_nat_bits(sbi, false);
1283
1284 if (cpc->reason & CP_TRIMMED)
1285 __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1286 else
1287 __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1288
1289 if (cpc->reason & CP_UMOUNT)
1290 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1291 else
1292 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1293
1294 if (cpc->reason & CP_FASTBOOT)
1295 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1296 else
1297 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1298
1299 if (orphan_num)
1300 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1301 else
1302 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1303
1304 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1305 is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1306 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1307
1308 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1309 __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1310 else
1311 __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1312
1313 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1314 __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1315 else
1316 __clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1317
1318 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
1319 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1320 else
1321 __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1322
1323 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
1324 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1325
1326 /* set this flag to activate crc|cp_ver for recovery */
1327 __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
1328 __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
1329
1330 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1331}
1332
1333static void commit_checkpoint(struct f2fs_sb_info *sbi,
1334 void *src, block_t blk_addr)
1335{
1336 struct writeback_control wbc = {
1337 .for_reclaim = 0,
1338 };
1339
1340 /*
1341 * pagevec_lookup_tag and lock_page again will take
1342 * some extra time. Therefore, f2fs_update_meta_pages and
1343 * f2fs_sync_meta_pages are combined in this function.
1344 */
1345 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
1346 int err;
1347
1348 f2fs_wait_on_page_writeback(page, META, true, true);
1349
1350 memcpy(page_address(page), src, PAGE_SIZE);
1351
1352 set_page_dirty(page);
1353 if (unlikely(!clear_page_dirty_for_io(page)))
1354 f2fs_bug_on(sbi, 1);
1355
1356 /* writeout cp pack 2 page */
1357 err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
1358 if (unlikely(err && f2fs_cp_error(sbi))) {
1359 f2fs_put_page(page, 1);
1360 return;
1361 }
1362
1363 f2fs_bug_on(sbi, err);
1364 f2fs_put_page(page, 0);
1365
1366 /* submit checkpoint (with barrier if NOBARRIER is not set) */
1367 f2fs_submit_merged_write(sbi, META_FLUSH);
1368}
1369
1370static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1371{
1372 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1373 struct f2fs_nm_info *nm_i = NM_I(sbi);
1374 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
1375 block_t start_blk;
1376 unsigned int data_sum_blocks, orphan_blocks;
1377 __u32 crc32 = 0;
1378 int i;
1379 int cp_payload_blks = __cp_payload(sbi);
1380 struct super_block *sb = sbi->sb;
1381 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1382 u64 kbytes_written;
1383 int err;
1384
1385 /* Flush all the NAT/SIT pages */
1386 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1387 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
1388 !f2fs_cp_error(sbi));
1389
1390 /*
1391 * modify checkpoint
1392 * version number is already updated
1393 */
1394 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1395 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1396 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1397 ckpt->cur_node_segno[i] =
1398 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1399 ckpt->cur_node_blkoff[i] =
1400 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1401 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1402 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1403 }
1404 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1405 ckpt->cur_data_segno[i] =
1406 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1407 ckpt->cur_data_blkoff[i] =
1408 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1409 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1410 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1411 }
1412
1413 /* 2 cp + n data seg summary + orphan inode blocks */
1414 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
1415 spin_lock_irqsave(&sbi->cp_lock, flags);
1416 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
1417 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1418 else
1419 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1420 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1421
1422 orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
1423 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
1424 orphan_blocks);
1425
1426 if (__remain_node_summaries(cpc->reason))
1427 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
1428 cp_payload_blks + data_sum_blocks +
1429 orphan_blocks + NR_CURSEG_NODE_TYPE);
1430 else
1431 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1432 cp_payload_blks + data_sum_blocks +
1433 orphan_blocks);
1434
1435 /* update ckpt flag for checkpoint */
1436 update_ckpt_flags(sbi, cpc);
1437
1438 /* update SIT/NAT bitmap */
1439 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1440 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1441
1442 crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
1443 *((__le32 *)((unsigned char *)ckpt +
1444 le32_to_cpu(ckpt->checksum_offset)))
1445 = cpu_to_le32(crc32);
1446
1447 start_blk = __start_cp_next_addr(sbi);
1448
1449 /* write nat bits */
1450 if (enabled_nat_bits(sbi, cpc)) {
1451 __u64 cp_ver = cur_cp_version(ckpt);
1452 block_t blk;
1453
1454 cp_ver |= ((__u64)crc32 << 32);
1455 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1456
1457 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1458 for (i = 0; i < nm_i->nat_bits_blocks; i++)
1459 f2fs_update_meta_page(sbi, nm_i->nat_bits +
1460 (i << F2FS_BLKSIZE_BITS), blk + i);
1461 }
1462
1463 /* write out checkpoint buffer at block 0 */
1464 f2fs_update_meta_page(sbi, ckpt, start_blk++);
1465
1466 for (i = 1; i < 1 + cp_payload_blks; i++)
1467 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1468 start_blk++);
1469
1470 if (orphan_num) {
1471 write_orphan_inodes(sbi, start_blk);
1472 start_blk += orphan_blocks;
1473 }
1474
1475 f2fs_write_data_summaries(sbi, start_blk);
1476 start_blk += data_sum_blocks;
1477
1478 /* Record write statistics in the hot node summary */
1479 kbytes_written = sbi->kbytes_written;
1480 if (sb->s_bdev->bd_part)
1481 kbytes_written += BD_PART_WRITTEN(sbi);
1482
1483 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
1484
1485 if (__remain_node_summaries(cpc->reason)) {
1486 f2fs_write_node_summaries(sbi, start_blk);
1487 start_blk += NR_CURSEG_NODE_TYPE;
1488 }
1489
1490 /* update user_block_counts */
1491 sbi->last_valid_block_count = sbi->total_valid_block_count;
1492 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1493
1494 /* Here, we have one bio having CP pack except cp pack 2 page */
1495 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1496 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
1497 !f2fs_cp_error(sbi));
1498
1499 /* wait for previous submitted meta pages writeback */
1500 f2fs_wait_on_all_pages_writeback(sbi);
1501
1502 /* flush all device cache */
1503 err = f2fs_flush_device_cache(sbi);
1504 if (err)
1505 return err;
1506
1507 /* barrier and flush checkpoint cp pack 2 page if it can */
1508 commit_checkpoint(sbi, ckpt, start_blk);
1509 f2fs_wait_on_all_pages_writeback(sbi);
1510
1511 /*
1512 * invalidate intermediate page cache borrowed from meta inode
1513 * which are used for migration of encrypted inode's blocks.
1514 */
1515 if (f2fs_sb_has_encrypt(sbi))
1516 invalidate_mapping_pages(META_MAPPING(sbi),
1517 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
1518
1519 f2fs_release_ino_entry(sbi, false);
1520
1521 f2fs_reset_fsync_node_info(sbi);
1522
1523 clear_sbi_flag(sbi, SBI_IS_DIRTY);
1524 clear_sbi_flag(sbi, SBI_NEED_CP);
1525 clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1526
1527 spin_lock(&sbi->stat_lock);
1528 sbi->unusable_block_count = 0;
1529 spin_unlock(&sbi->stat_lock);
1530
1531 __set_cp_next_pack(sbi);
1532
1533 /*
1534 * redirty superblock if metadata like node page or inode cache is
1535 * updated during writing checkpoint.
1536 */
1537 if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1538 get_pages(sbi, F2FS_DIRTY_IMETA))
1539 set_sbi_flag(sbi, SBI_IS_DIRTY);
1540
1541 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1542
1543 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1544}
1545
1546/*
1547 * We guarantee that this checkpoint procedure will not fail.
1548 */
1549int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1550{
1551 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1552 unsigned long long ckpt_ver;
1553 int err = 0;
1554
1555 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1556 return -EROFS;
1557
1558 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1559 if (cpc->reason != CP_PAUSE)
1560 return 0;
1561 f2fs_warn(sbi, "Start checkpoint disabled!");
1562 }
1563 mutex_lock(&sbi->cp_mutex);
1564
1565 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1566 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1567 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1568 goto out;
1569 if (unlikely(f2fs_cp_error(sbi))) {
1570 err = -EIO;
1571 goto out;
1572 }
1573
1574 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1575
1576 err = block_operations(sbi);
1577 if (err)
1578 goto out;
1579
1580 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1581
1582 f2fs_flush_merged_writes(sbi);
1583
1584 /* this is the case of multiple fstrims without any changes */
1585 if (cpc->reason & CP_DISCARD) {
1586 if (!f2fs_exist_trim_candidates(sbi, cpc)) {
1587 unblock_operations(sbi);
1588 goto out;
1589 }
1590
1591 if (NM_I(sbi)->dirty_nat_cnt == 0 &&
1592 SIT_I(sbi)->dirty_sentries == 0 &&
1593 prefree_segments(sbi) == 0) {
1594 f2fs_flush_sit_entries(sbi, cpc);
1595 f2fs_clear_prefree_segments(sbi, cpc);
1596 unblock_operations(sbi);
1597 goto out;
1598 }
1599 }
1600
1601 /*
1602 * update checkpoint pack index
1603 * Increase the version number so that
1604 * SIT entries and seg summaries are written at correct place
1605 */
1606 ckpt_ver = cur_cp_version(ckpt);
1607 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1608
1609 /* write cached NAT/SIT entries to NAT/SIT area */
1610 err = f2fs_flush_nat_entries(sbi, cpc);
1611 if (err)
1612 goto stop;
1613
1614 f2fs_flush_sit_entries(sbi, cpc);
1615
1616 /* unlock all the fs_lock[] in do_checkpoint() */
1617 err = do_checkpoint(sbi, cpc);
1618 if (err)
1619 f2fs_release_discard_addrs(sbi);
1620 else
1621 f2fs_clear_prefree_segments(sbi, cpc);
1622stop:
1623 unblock_operations(sbi);
1624 stat_inc_cp_count(sbi->stat_info);
1625
1626 if (cpc->reason & CP_RECOVERY)
1627 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1628
1629 /* do checkpoint periodically */
1630 f2fs_update_time(sbi, CP_TIME);
1631 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1632out:
1633 mutex_unlock(&sbi->cp_mutex);
1634 return err;
1635}
1636
1637void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1638{
1639 int i;
1640
1641 for (i = 0; i < MAX_INO_ENTRY; i++) {
1642 struct inode_management *im = &sbi->im[i];
1643
1644 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1645 spin_lock_init(&im->ino_lock);
1646 INIT_LIST_HEAD(&im->ino_list);
1647 im->ino_num = 0;
1648 }
1649
1650 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1651 NR_CURSEG_TYPE - __cp_payload(sbi)) *
1652 F2FS_ORPHANS_PER_BLOCK;
1653}
1654
1655int __init f2fs_create_checkpoint_caches(void)
1656{
1657 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1658 sizeof(struct ino_entry));
1659 if (!ino_entry_slab)
1660 return -ENOMEM;
1661 f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
1662 sizeof(struct inode_entry));
1663 if (!f2fs_inode_entry_slab) {
1664 kmem_cache_destroy(ino_entry_slab);
1665 return -ENOMEM;
1666 }
1667 return 0;
1668}
1669
1670void f2fs_destroy_checkpoint_caches(void)
1671{
1672 kmem_cache_destroy(ino_entry_slab);
1673 kmem_cache_destroy(f2fs_inode_entry_slab);
1674}