Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * fs/f2fs/checkpoint.c
  3 *
  4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5 *             http://www.samsung.com/
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#include <linux/fs.h>
 12#include <linux/bio.h>
 13#include <linux/mpage.h>
 14#include <linux/writeback.h>
 15#include <linux/blkdev.h>
 16#include <linux/f2fs_fs.h>
 17#include <linux/pagevec.h>
 18#include <linux/swap.h>
 19
 20#include "f2fs.h"
 21#include "node.h"
 22#include "segment.h"
 23#include <trace/events/f2fs.h>
 24
 25static struct kmem_cache *orphan_entry_slab;
 26static struct kmem_cache *inode_entry_slab;
 27
 28/*
 29 * We guarantee no failure on the returned page.
 30 */
 31struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
 32{
 33	struct address_space *mapping = META_MAPPING(sbi);
 34	struct page *page = NULL;
 35repeat:
 36	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
 37	if (!page) {
 38		cond_resched();
 39		goto repeat;
 40	}
 41
 42	SetPageUptodate(page);
 43	return page;
 44}
 45
 46/*
 47 * We guarantee no failure on the returned page.
 48 */
 49struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
 50{
 51	struct address_space *mapping = META_MAPPING(sbi);
 52	struct page *page;
 53repeat:
 54	page = grab_cache_page(mapping, index);
 55	if (!page) {
 56		cond_resched();
 57		goto repeat;
 58	}
 59	if (PageUptodate(page))
 60		goto out;
 61
 62	if (f2fs_submit_page_bio(sbi, page, index,
 63				READ_SYNC | REQ_META | REQ_PRIO))
 64		goto repeat;
 65
 66	lock_page(page);
 67	if (unlikely(page->mapping != mapping)) {
 68		f2fs_put_page(page, 1);
 69		goto repeat;
 70	}
 71out:
 72	mark_page_accessed(page);
 73	return page;
 74}
 75
 76inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
 77{
 78	switch (type) {
 79	case META_NAT:
 80		return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
 81	case META_SIT:
 82		return SIT_BLK_CNT(sbi);
 83	case META_SSA:
 84	case META_CP:
 85		return 0;
 86	default:
 87		BUG();
 88	}
 89}
 90
 91/*
 92 * Readahead CP/NAT/SIT/SSA pages
 93 */
 94int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
 95{
 96	block_t prev_blk_addr = 0;
 97	struct page *page;
 98	int blkno = start;
 99	int max_blks = get_max_meta_blks(sbi, type);
100
101	struct f2fs_io_info fio = {
102		.type = META,
103		.rw = READ_SYNC | REQ_META | REQ_PRIO
104	};
105
106	for (; nrpages-- > 0; blkno++) {
107		block_t blk_addr;
108
109		switch (type) {
110		case META_NAT:
111			/* get nat block addr */
112			if (unlikely(blkno >= max_blks))
113				blkno = 0;
114			blk_addr = current_nat_addr(sbi,
115					blkno * NAT_ENTRY_PER_BLOCK);
116			break;
117		case META_SIT:
118			/* get sit block addr */
119			if (unlikely(blkno >= max_blks))
120				goto out;
121			blk_addr = current_sit_addr(sbi,
122					blkno * SIT_ENTRY_PER_BLOCK);
123			if (blkno != start && prev_blk_addr + 1 != blk_addr)
124				goto out;
125			prev_blk_addr = blk_addr;
126			break;
127		case META_SSA:
128		case META_CP:
129			/* get ssa/cp block addr */
130			blk_addr = blkno;
131			break;
132		default:
133			BUG();
134		}
135
136		page = grab_cache_page(META_MAPPING(sbi), blk_addr);
137		if (!page)
138			continue;
139		if (PageUptodate(page)) {
140			mark_page_accessed(page);
141			f2fs_put_page(page, 1);
142			continue;
143		}
144
145		f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
146		mark_page_accessed(page);
147		f2fs_put_page(page, 0);
148	}
149out:
150	f2fs_submit_merged_bio(sbi, META, READ);
151	return blkno - start;
152}
153
154static int f2fs_write_meta_page(struct page *page,
155				struct writeback_control *wbc)
156{
157	struct inode *inode = page->mapping->host;
158	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
159
160	if (unlikely(sbi->por_doing))
161		goto redirty_out;
162	if (wbc->for_reclaim)
163		goto redirty_out;
164
165	/* Should not write any meta pages, if any IO error was occurred */
166	if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
167		goto no_write;
168
169	f2fs_wait_on_page_writeback(page, META);
170	write_meta_page(sbi, page);
171no_write:
172	dec_page_count(sbi, F2FS_DIRTY_META);
173	unlock_page(page);
174	return 0;
175
176redirty_out:
177	dec_page_count(sbi, F2FS_DIRTY_META);
178	wbc->pages_skipped++;
179	account_page_redirty(page);
180	set_page_dirty(page);
181	return AOP_WRITEPAGE_ACTIVATE;
182}
183
184static int f2fs_write_meta_pages(struct address_space *mapping,
185				struct writeback_control *wbc)
186{
187	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
188	long diff, written;
189
190	/* collect a number of dirty meta pages and write together */
191	if (wbc->for_kupdate ||
192		get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
193		goto skip_write;
194
195	/* if mounting is failed, skip writing node pages */
196	mutex_lock(&sbi->cp_mutex);
197	diff = nr_pages_to_write(sbi, META, wbc);
198	written = sync_meta_pages(sbi, META, wbc->nr_to_write);
199	mutex_unlock(&sbi->cp_mutex);
200	wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
201	return 0;
202
203skip_write:
204	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
205	return 0;
206}
207
208long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
209						long nr_to_write)
210{
211	struct address_space *mapping = META_MAPPING(sbi);
212	pgoff_t index = 0, end = LONG_MAX;
213	struct pagevec pvec;
214	long nwritten = 0;
215	struct writeback_control wbc = {
216		.for_reclaim = 0,
217	};
218
219	pagevec_init(&pvec, 0);
220
221	while (index <= end) {
222		int i, nr_pages;
223		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
224				PAGECACHE_TAG_DIRTY,
225				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
226		if (unlikely(nr_pages == 0))
227			break;
228
229		for (i = 0; i < nr_pages; i++) {
230			struct page *page = pvec.pages[i];
231
232			lock_page(page);
233
234			if (unlikely(page->mapping != mapping)) {
235continue_unlock:
236				unlock_page(page);
237				continue;
238			}
239			if (!PageDirty(page)) {
240				/* someone wrote it for us */
241				goto continue_unlock;
242			}
243
244			if (!clear_page_dirty_for_io(page))
245				goto continue_unlock;
246
247			if (f2fs_write_meta_page(page, &wbc)) {
248				unlock_page(page);
249				break;
250			}
251			nwritten++;
252			if (unlikely(nwritten >= nr_to_write))
253				break;
254		}
255		pagevec_release(&pvec);
256		cond_resched();
257	}
258
259	if (nwritten)
260		f2fs_submit_merged_bio(sbi, type, WRITE);
261
262	return nwritten;
263}
264
265static int f2fs_set_meta_page_dirty(struct page *page)
266{
267	struct address_space *mapping = page->mapping;
268	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
269
270	trace_f2fs_set_page_dirty(page, META);
271
272	SetPageUptodate(page);
273	if (!PageDirty(page)) {
274		__set_page_dirty_nobuffers(page);
275		inc_page_count(sbi, F2FS_DIRTY_META);
276		return 1;
277	}
278	return 0;
279}
280
281const struct address_space_operations f2fs_meta_aops = {
282	.writepage	= f2fs_write_meta_page,
283	.writepages	= f2fs_write_meta_pages,
284	.set_page_dirty	= f2fs_set_meta_page_dirty,
285};
286
287int acquire_orphan_inode(struct f2fs_sb_info *sbi)
288{
289	int err = 0;
290
291	spin_lock(&sbi->orphan_inode_lock);
292	if (unlikely(sbi->n_orphans >= sbi->max_orphans))
293		err = -ENOSPC;
294	else
295		sbi->n_orphans++;
296	spin_unlock(&sbi->orphan_inode_lock);
297
298	return err;
299}
300
301void release_orphan_inode(struct f2fs_sb_info *sbi)
302{
303	spin_lock(&sbi->orphan_inode_lock);
304	f2fs_bug_on(sbi->n_orphans == 0);
305	sbi->n_orphans--;
306	spin_unlock(&sbi->orphan_inode_lock);
307}
308
309void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
310{
311	struct list_head *head;
312	struct orphan_inode_entry *new, *orphan;
313
314	new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
315	new->ino = ino;
316
317	spin_lock(&sbi->orphan_inode_lock);
318	head = &sbi->orphan_inode_list;
319	list_for_each_entry(orphan, head, list) {
320		if (orphan->ino == ino) {
321			spin_unlock(&sbi->orphan_inode_lock);
322			kmem_cache_free(orphan_entry_slab, new);
323			return;
324		}
325
326		if (orphan->ino > ino)
327			break;
328	}
329
330	/* add new orphan entry into list which is sorted by inode number */
331	list_add_tail(&new->list, &orphan->list);
332	spin_unlock(&sbi->orphan_inode_lock);
333}
334
335void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
336{
337	struct list_head *head;
338	struct orphan_inode_entry *orphan;
339
340	spin_lock(&sbi->orphan_inode_lock);
341	head = &sbi->orphan_inode_list;
342	list_for_each_entry(orphan, head, list) {
343		if (orphan->ino == ino) {
344			list_del(&orphan->list);
345			f2fs_bug_on(sbi->n_orphans == 0);
346			sbi->n_orphans--;
347			spin_unlock(&sbi->orphan_inode_lock);
348			kmem_cache_free(orphan_entry_slab, orphan);
349			return;
350		}
351	}
352	spin_unlock(&sbi->orphan_inode_lock);
353}
354
355static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
356{
357	struct inode *inode = f2fs_iget(sbi->sb, ino);
358	f2fs_bug_on(IS_ERR(inode));
359	clear_nlink(inode);
360
361	/* truncate all the data during iput */
362	iput(inode);
363}
364
365void recover_orphan_inodes(struct f2fs_sb_info *sbi)
366{
367	block_t start_blk, orphan_blkaddr, i, j;
368
369	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
370		return;
371
372	sbi->por_doing = true;
373	start_blk = __start_cp_addr(sbi) + 1;
374	orphan_blkaddr = __start_sum_addr(sbi) - 1;
375
376	ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
377
378	for (i = 0; i < orphan_blkaddr; i++) {
379		struct page *page = get_meta_page(sbi, start_blk + i);
380		struct f2fs_orphan_block *orphan_blk;
381
382		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
383		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
384			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
385			recover_orphan_inode(sbi, ino);
386		}
387		f2fs_put_page(page, 1);
388	}
389	/* clear Orphan Flag */
390	clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
391	sbi->por_doing = false;
392	return;
393}
394
395static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
396{
397	struct list_head *head;
398	struct f2fs_orphan_block *orphan_blk = NULL;
399	unsigned int nentries = 0;
400	unsigned short index;
401	unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans +
402		(F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
403	struct page *page = NULL;
404	struct orphan_inode_entry *orphan = NULL;
405
406	for (index = 0; index < orphan_blocks; index++)
407		grab_meta_page(sbi, start_blk + index);
408
409	index = 1;
410	spin_lock(&sbi->orphan_inode_lock);
411	head = &sbi->orphan_inode_list;
412
413	/* loop for each orphan inode entry and write them in Jornal block */
414	list_for_each_entry(orphan, head, list) {
415		if (!page) {
416			page = find_get_page(META_MAPPING(sbi), start_blk++);
417			f2fs_bug_on(!page);
418			orphan_blk =
419				(struct f2fs_orphan_block *)page_address(page);
420			memset(orphan_blk, 0, sizeof(*orphan_blk));
421			f2fs_put_page(page, 0);
422		}
423
424		orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
425
426		if (nentries == F2FS_ORPHANS_PER_BLOCK) {
427			/*
428			 * an orphan block is full of 1020 entries,
429			 * then we need to flush current orphan blocks
430			 * and bring another one in memory
431			 */
432			orphan_blk->blk_addr = cpu_to_le16(index);
433			orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
434			orphan_blk->entry_count = cpu_to_le32(nentries);
435			set_page_dirty(page);
436			f2fs_put_page(page, 1);
437			index++;
438			nentries = 0;
439			page = NULL;
440		}
441	}
442
443	if (page) {
444		orphan_blk->blk_addr = cpu_to_le16(index);
445		orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
446		orphan_blk->entry_count = cpu_to_le32(nentries);
447		set_page_dirty(page);
448		f2fs_put_page(page, 1);
449	}
450
451	spin_unlock(&sbi->orphan_inode_lock);
452}
453
454static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
455				block_t cp_addr, unsigned long long *version)
456{
457	struct page *cp_page_1, *cp_page_2 = NULL;
458	unsigned long blk_size = sbi->blocksize;
459	struct f2fs_checkpoint *cp_block;
460	unsigned long long cur_version = 0, pre_version = 0;
461	size_t crc_offset;
462	__u32 crc = 0;
463
464	/* Read the 1st cp block in this CP pack */
465	cp_page_1 = get_meta_page(sbi, cp_addr);
466
467	/* get the version number */
468	cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
469	crc_offset = le32_to_cpu(cp_block->checksum_offset);
470	if (crc_offset >= blk_size)
471		goto invalid_cp1;
472
473	crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
474	if (!f2fs_crc_valid(crc, cp_block, crc_offset))
475		goto invalid_cp1;
476
477	pre_version = cur_cp_version(cp_block);
478
479	/* Read the 2nd cp block in this CP pack */
480	cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
481	cp_page_2 = get_meta_page(sbi, cp_addr);
482
483	cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
484	crc_offset = le32_to_cpu(cp_block->checksum_offset);
485	if (crc_offset >= blk_size)
486		goto invalid_cp2;
487
488	crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset)));
489	if (!f2fs_crc_valid(crc, cp_block, crc_offset))
490		goto invalid_cp2;
491
492	cur_version = cur_cp_version(cp_block);
493
494	if (cur_version == pre_version) {
495		*version = cur_version;
496		f2fs_put_page(cp_page_2, 1);
497		return cp_page_1;
498	}
499invalid_cp2:
500	f2fs_put_page(cp_page_2, 1);
501invalid_cp1:
502	f2fs_put_page(cp_page_1, 1);
503	return NULL;
504}
505
506int get_valid_checkpoint(struct f2fs_sb_info *sbi)
507{
508	struct f2fs_checkpoint *cp_block;
509	struct f2fs_super_block *fsb = sbi->raw_super;
510	struct page *cp1, *cp2, *cur_page;
511	unsigned long blk_size = sbi->blocksize;
512	unsigned long long cp1_version = 0, cp2_version = 0;
513	unsigned long long cp_start_blk_no;
514
515	sbi->ckpt = kzalloc(blk_size, GFP_KERNEL);
516	if (!sbi->ckpt)
517		return -ENOMEM;
518	/*
519	 * Finding out valid cp block involves read both
520	 * sets( cp pack1 and cp pack 2)
521	 */
522	cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
523	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
524
525	/* The second checkpoint pack should start at the next segment */
526	cp_start_blk_no += ((unsigned long long)1) <<
527				le32_to_cpu(fsb->log_blocks_per_seg);
528	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
529
530	if (cp1 && cp2) {
531		if (ver_after(cp2_version, cp1_version))
532			cur_page = cp2;
533		else
534			cur_page = cp1;
535	} else if (cp1) {
536		cur_page = cp1;
537	} else if (cp2) {
538		cur_page = cp2;
539	} else {
540		goto fail_no_cp;
541	}
542
543	cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
544	memcpy(sbi->ckpt, cp_block, blk_size);
545
546	f2fs_put_page(cp1, 1);
547	f2fs_put_page(cp2, 1);
548	return 0;
549
550fail_no_cp:
551	kfree(sbi->ckpt);
552	return -EINVAL;
553}
554
555static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
556{
557	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
558	struct list_head *head = &sbi->dir_inode_list;
559	struct dir_inode_entry *entry;
560
561	list_for_each_entry(entry, head, list)
562		if (unlikely(entry->inode == inode))
563			return -EEXIST;
564
565	list_add_tail(&new->list, head);
566	stat_inc_dirty_dir(sbi);
567	return 0;
568}
569
570void set_dirty_dir_page(struct inode *inode, struct page *page)
571{
572	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
573	struct dir_inode_entry *new;
574	int ret = 0;
575
576	if (!S_ISDIR(inode->i_mode))
577		return;
578
579	new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
580	new->inode = inode;
581	INIT_LIST_HEAD(&new->list);
582
583	spin_lock(&sbi->dir_inode_lock);
584	ret = __add_dirty_inode(inode, new);
585	inode_inc_dirty_dents(inode);
586	SetPagePrivate(page);
587	spin_unlock(&sbi->dir_inode_lock);
588
589	if (ret)
590		kmem_cache_free(inode_entry_slab, new);
591}
592
593void add_dirty_dir_inode(struct inode *inode)
594{
595	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
596	struct dir_inode_entry *new =
597			f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
598	int ret = 0;
599
600	new->inode = inode;
601	INIT_LIST_HEAD(&new->list);
602
603	spin_lock(&sbi->dir_inode_lock);
604	ret = __add_dirty_inode(inode, new);
605	spin_unlock(&sbi->dir_inode_lock);
606
607	if (ret)
608		kmem_cache_free(inode_entry_slab, new);
609}
610
611void remove_dirty_dir_inode(struct inode *inode)
612{
613	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
614	struct list_head *head;
615	struct dir_inode_entry *entry;
616
617	if (!S_ISDIR(inode->i_mode))
618		return;
619
620	spin_lock(&sbi->dir_inode_lock);
621	if (get_dirty_dents(inode)) {
622		spin_unlock(&sbi->dir_inode_lock);
623		return;
624	}
625
626	head = &sbi->dir_inode_list;
627	list_for_each_entry(entry, head, list) {
628		if (entry->inode == inode) {
629			list_del(&entry->list);
630			stat_dec_dirty_dir(sbi);
631			spin_unlock(&sbi->dir_inode_lock);
632			kmem_cache_free(inode_entry_slab, entry);
633			goto done;
634		}
635	}
636	spin_unlock(&sbi->dir_inode_lock);
637
638done:
639	/* Only from the recovery routine */
640	if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
641		clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
642		iput(inode);
643	}
644}
645
646struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino)
647{
648
649	struct list_head *head;
650	struct inode *inode = NULL;
651	struct dir_inode_entry *entry;
652
653	spin_lock(&sbi->dir_inode_lock);
654
655	head = &sbi->dir_inode_list;
656	list_for_each_entry(entry, head, list) {
657		if (entry->inode->i_ino == ino) {
658			inode = entry->inode;
659			break;
660		}
661	}
662	spin_unlock(&sbi->dir_inode_lock);
663	return inode;
664}
665
666void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
667{
668	struct list_head *head;
669	struct dir_inode_entry *entry;
670	struct inode *inode;
671retry:
672	spin_lock(&sbi->dir_inode_lock);
673
674	head = &sbi->dir_inode_list;
675	if (list_empty(head)) {
676		spin_unlock(&sbi->dir_inode_lock);
677		return;
678	}
679	entry = list_entry(head->next, struct dir_inode_entry, list);
680	inode = igrab(entry->inode);
681	spin_unlock(&sbi->dir_inode_lock);
682	if (inode) {
683		filemap_fdatawrite(inode->i_mapping);
684		iput(inode);
685	} else {
686		/*
687		 * We should submit bio, since it exists several
688		 * wribacking dentry pages in the freeing inode.
689		 */
690		f2fs_submit_merged_bio(sbi, DATA, WRITE);
691	}
692	goto retry;
693}
694
695/*
696 * Freeze all the FS-operations for checkpoint.
697 */
698static void block_operations(struct f2fs_sb_info *sbi)
699{
700	struct writeback_control wbc = {
701		.sync_mode = WB_SYNC_ALL,
702		.nr_to_write = LONG_MAX,
703		.for_reclaim = 0,
704	};
705	struct blk_plug plug;
706
707	blk_start_plug(&plug);
708
709retry_flush_dents:
710	f2fs_lock_all(sbi);
711	/* write all the dirty dentry pages */
712	if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
713		f2fs_unlock_all(sbi);
714		sync_dirty_dir_inodes(sbi);
715		goto retry_flush_dents;
716	}
717
718	/*
719	 * POR: we should ensure that there is no dirty node pages
720	 * until finishing nat/sit flush.
721	 */
722retry_flush_nodes:
723	mutex_lock(&sbi->node_write);
724
725	if (get_pages(sbi, F2FS_DIRTY_NODES)) {
726		mutex_unlock(&sbi->node_write);
727		sync_node_pages(sbi, 0, &wbc);
728		goto retry_flush_nodes;
729	}
730	blk_finish_plug(&plug);
731}
732
733static void unblock_operations(struct f2fs_sb_info *sbi)
734{
735	mutex_unlock(&sbi->node_write);
736	f2fs_unlock_all(sbi);
737}
738
739static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
740{
741	DEFINE_WAIT(wait);
742
743	for (;;) {
744		prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
745
746		if (!get_pages(sbi, F2FS_WRITEBACK))
747			break;
748
749		io_schedule();
750	}
751	finish_wait(&sbi->cp_wait, &wait);
752}
753
754static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
755{
756	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
757	nid_t last_nid = 0;
758	block_t start_blk;
759	struct page *cp_page;
760	unsigned int data_sum_blocks, orphan_blocks;
761	__u32 crc32 = 0;
762	void *kaddr;
763	int i;
764
765	/* Flush all the NAT/SIT pages */
766	while (get_pages(sbi, F2FS_DIRTY_META))
767		sync_meta_pages(sbi, META, LONG_MAX);
768
769	next_free_nid(sbi, &last_nid);
770
771	/*
772	 * modify checkpoint
773	 * version number is already updated
774	 */
775	ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
776	ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
777	ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
778	for (i = 0; i < 3; i++) {
779		ckpt->cur_node_segno[i] =
780			cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
781		ckpt->cur_node_blkoff[i] =
782			cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
783		ckpt->alloc_type[i + CURSEG_HOT_NODE] =
784				curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
785	}
786	for (i = 0; i < 3; i++) {
787		ckpt->cur_data_segno[i] =
788			cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
789		ckpt->cur_data_blkoff[i] =
790			cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
791		ckpt->alloc_type[i + CURSEG_HOT_DATA] =
792				curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
793	}
794
795	ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
796	ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
797	ckpt->next_free_nid = cpu_to_le32(last_nid);
798
799	/* 2 cp  + n data seg summary + orphan inode blocks */
800	data_sum_blocks = npages_for_summary_flush(sbi);
801	if (data_sum_blocks < 3)
802		set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
803	else
804		clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
805
806	orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
807					/ F2FS_ORPHANS_PER_BLOCK;
808	ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks);
809
810	if (is_umount) {
811		set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
812		ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
813			data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE);
814	} else {
815		clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
816		ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
817			data_sum_blocks + orphan_blocks);
818	}
819
820	if (sbi->n_orphans)
821		set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
822	else
823		clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
824
825	/* update SIT/NAT bitmap */
826	get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
827	get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
828
829	crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
830	*((__le32 *)((unsigned char *)ckpt +
831				le32_to_cpu(ckpt->checksum_offset)))
832				= cpu_to_le32(crc32);
833
834	start_blk = __start_cp_addr(sbi);
835
836	/* write out checkpoint buffer at block 0 */
837	cp_page = grab_meta_page(sbi, start_blk++);
838	kaddr = page_address(cp_page);
839	memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
840	set_page_dirty(cp_page);
841	f2fs_put_page(cp_page, 1);
842
843	if (sbi->n_orphans) {
844		write_orphan_inodes(sbi, start_blk);
845		start_blk += orphan_blocks;
846	}
847
848	write_data_summaries(sbi, start_blk);
849	start_blk += data_sum_blocks;
850	if (is_umount) {
851		write_node_summaries(sbi, start_blk);
852		start_blk += NR_CURSEG_NODE_TYPE;
853	}
854
855	/* writeout checkpoint block */
856	cp_page = grab_meta_page(sbi, start_blk);
857	kaddr = page_address(cp_page);
858	memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
859	set_page_dirty(cp_page);
860	f2fs_put_page(cp_page, 1);
861
862	/* wait for previous submitted node/meta pages writeback */
863	wait_on_all_pages_writeback(sbi);
864
865	filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
866	filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
867
868	/* update user_block_counts */
869	sbi->last_valid_block_count = sbi->total_valid_block_count;
870	sbi->alloc_valid_block_count = 0;
871
872	/* Here, we only have one bio having CP pack */
873	sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
874
875	if (unlikely(!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) {
876		clear_prefree_segments(sbi);
877		F2FS_RESET_SB_DIRT(sbi);
878	}
879}
880
881/*
882 * We guarantee that this checkpoint procedure should not fail.
883 */
884void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
885{
886	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
887	unsigned long long ckpt_ver;
888
889	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
890
891	mutex_lock(&sbi->cp_mutex);
892	block_operations(sbi);
893
894	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
895
896	f2fs_submit_merged_bio(sbi, DATA, WRITE);
897	f2fs_submit_merged_bio(sbi, NODE, WRITE);
898	f2fs_submit_merged_bio(sbi, META, WRITE);
899
900	/*
901	 * update checkpoint pack index
902	 * Increase the version number so that
903	 * SIT entries and seg summaries are written at correct place
904	 */
905	ckpt_ver = cur_cp_version(ckpt);
906	ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
907
908	/* write cached NAT/SIT entries to NAT/SIT area */
909	flush_nat_entries(sbi);
910	flush_sit_entries(sbi);
911
912	/* unlock all the fs_lock[] in do_checkpoint() */
913	do_checkpoint(sbi, is_umount);
914
915	unblock_operations(sbi);
916	mutex_unlock(&sbi->cp_mutex);
917
918	stat_inc_cp_count(sbi->stat_info);
919	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
920}
921
922void init_orphan_info(struct f2fs_sb_info *sbi)
923{
924	spin_lock_init(&sbi->orphan_inode_lock);
925	INIT_LIST_HEAD(&sbi->orphan_inode_list);
926	sbi->n_orphans = 0;
927	/*
928	 * considering 512 blocks in a segment 8 blocks are needed for cp
929	 * and log segment summaries. Remaining blocks are used to keep
930	 * orphan entries with the limitation one reserved segment
931	 * for cp pack we can have max 1020*504 orphan entries
932	 */
933	sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
934				* F2FS_ORPHANS_PER_BLOCK;
935}
936
937int __init create_checkpoint_caches(void)
938{
939	orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
940			sizeof(struct orphan_inode_entry));
941	if (!orphan_entry_slab)
942		return -ENOMEM;
943	inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
944			sizeof(struct dir_inode_entry));
945	if (!inode_entry_slab) {
946		kmem_cache_destroy(orphan_entry_slab);
947		return -ENOMEM;
948	}
949	return 0;
950}
951
952void destroy_checkpoint_caches(void)
953{
954	kmem_cache_destroy(orphan_entry_slab);
955	kmem_cache_destroy(inode_entry_slab);
956}