Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * fs/f2fs/recovery.c
  4 *
  5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6 *             http://www.samsung.com/
 
 
 
 
  7 */
  8#include <linux/fs.h>
  9#include <linux/f2fs_fs.h>
 10#include "f2fs.h"
 11#include "node.h"
 12#include "segment.h"
 13
 14/*
 15 * Roll forward recovery scenarios.
 16 *
 17 * [Term] F: fsync_mark, D: dentry_mark
 18 *
 19 * 1. inode(x) | CP | inode(x) | dnode(F)
 20 * -> Update the latest inode(x).
 21 *
 22 * 2. inode(x) | CP | inode(F) | dnode(F)
 23 * -> No problem.
 24 *
 25 * 3. inode(x) | CP | dnode(F) | inode(x)
 26 * -> Recover to the latest dnode(F), and drop the last inode(x)
 27 *
 28 * 4. inode(x) | CP | dnode(F) | inode(F)
 29 * -> No problem.
 30 *
 31 * 5. CP | inode(x) | dnode(F)
 32 * -> The inode(DF) was missing. Should drop this dnode(F).
 33 *
 34 * 6. CP | inode(DF) | dnode(F)
 35 * -> No problem.
 36 *
 37 * 7. CP | dnode(F) | inode(DF)
 38 * -> If f2fs_iget fails, then goto next to find inode(DF).
 39 *
 40 * 8. CP | dnode(F) | inode(x)
 41 * -> If f2fs_iget fails, then goto next to find inode(DF).
 42 *    But it will fail due to no inode(DF).
 43 */
 44
 45static struct kmem_cache *fsync_entry_slab;
 46
 47bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
 48{
 49	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
 50
 51	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
 52		return false;
 53	return true;
 54}
 55
 56static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
 57								nid_t ino)
 58{
 59	struct fsync_inode_entry *entry;
 60
 61	list_for_each_entry(entry, head, list)
 62		if (entry->inode->i_ino == ino)
 63			return entry;
 64
 65	return NULL;
 66}
 67
 68static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
 69			struct list_head *head, nid_t ino, bool quota_inode)
 70{
 71	struct inode *inode;
 72	struct fsync_inode_entry *entry;
 73	int err;
 74
 75	inode = f2fs_iget_retry(sbi->sb, ino);
 76	if (IS_ERR(inode))
 77		return ERR_CAST(inode);
 78
 79	err = dquot_initialize(inode);
 80	if (err)
 81		goto err_out;
 82
 83	if (quota_inode) {
 84		err = dquot_alloc_inode(inode);
 85		if (err)
 86			goto err_out;
 87	}
 88
 89	entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
 90	entry->inode = inode;
 91	list_add_tail(&entry->list, head);
 92
 93	return entry;
 94err_out:
 95	iput(inode);
 96	return ERR_PTR(err);
 97}
 98
 99static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
100{
101	if (drop) {
102		/* inode should not be recovered, drop it */
103		f2fs_inode_synced(entry->inode);
104	}
105	iput(entry->inode);
106	list_del(&entry->list);
107	kmem_cache_free(fsync_entry_slab, entry);
108}
109
110static int init_recovered_filename(const struct inode *dir,
111				   struct f2fs_inode *raw_inode,
112				   struct f2fs_filename *fname,
113				   struct qstr *usr_fname)
114{
115	int err;
116
117	memset(fname, 0, sizeof(*fname));
118	fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
119	fname->disk_name.name = raw_inode->i_name;
120
121	if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
122		return -ENAMETOOLONG;
123
124	if (!IS_ENCRYPTED(dir)) {
125		usr_fname->name = fname->disk_name.name;
126		usr_fname->len = fname->disk_name.len;
127		fname->usr_fname = usr_fname;
128	}
129
130	/* Compute the hash of the filename */
131	if (IS_CASEFOLDED(dir)) {
132		err = f2fs_init_casefolded_name(dir, fname);
133		if (err)
134			return err;
135		f2fs_hash_filename(dir, fname);
136#ifdef CONFIG_UNICODE
137		/* Case-sensitive match is fine for recovery */
138		kfree(fname->cf_name.name);
139		fname->cf_name.name = NULL;
140#endif
141	} else {
142		f2fs_hash_filename(dir, fname);
143	}
144	return 0;
145}
146
147static int recover_dentry(struct inode *inode, struct page *ipage,
148						struct list_head *dir_list)
149{
150	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
151	nid_t pino = le32_to_cpu(raw_inode->i_pino);
152	struct f2fs_dir_entry *de;
153	struct f2fs_filename fname;
154	struct qstr usr_fname;
155	struct page *page;
156	struct inode *dir, *einode;
157	struct fsync_inode_entry *entry;
158	int err = 0;
159	char *name;
160
161	entry = get_fsync_inode(dir_list, pino);
162	if (!entry) {
163		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
164							pino, false);
165		if (IS_ERR(entry)) {
166			dir = ERR_CAST(entry);
167			err = PTR_ERR(entry);
168			goto out;
169		}
170	}
171
172	dir = entry->inode;
173	err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
174	if (err)
 
 
 
 
 
 
175		goto out;
 
176retry:
177	de = __f2fs_find_entry(dir, &fname, &page);
178	if (de && inode->i_ino == le32_to_cpu(de->ino))
179		goto out_put;
180
181	if (de) {
182		einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
183		if (IS_ERR(einode)) {
184			WARN_ON(1);
185			err = PTR_ERR(einode);
186			if (err == -ENOENT)
187				err = -EEXIST;
188			goto out_put;
189		}
190
191		err = dquot_initialize(einode);
192		if (err) {
193			iput(einode);
194			goto out_put;
195		}
196
197		err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
198		if (err) {
199			iput(einode);
200			goto out_put;
201		}
202		f2fs_delete_entry(de, page, dir, einode);
203		iput(einode);
204		goto retry;
205	} else if (IS_ERR(page)) {
206		err = PTR_ERR(page);
207	} else {
208		err = f2fs_add_dentry(dir, &fname, inode,
209					inode->i_ino, inode->i_mode);
210	}
211	if (err == -ENOMEM)
212		goto retry;
213	goto out;
214
215out_put:
216	f2fs_put_page(page, 0);
217out:
218	if (file_enc_name(inode))
219		name = "<encrypted>";
220	else
221		name = raw_inode->i_name;
222	f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
223		    __func__, ino_of_node(ipage), name,
224		    IS_ERR(dir) ? 0 : dir->i_ino, err);
225	return err;
226}
227
228static int recover_quota_data(struct inode *inode, struct page *page)
229{
230	struct f2fs_inode *raw = F2FS_INODE(page);
231	struct iattr attr;
232	uid_t i_uid = le32_to_cpu(raw->i_uid);
233	gid_t i_gid = le32_to_cpu(raw->i_gid);
234	int err;
235
236	memset(&attr, 0, sizeof(attr));
237
238	attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
239	attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
240
241	if (!uid_eq(attr.ia_uid, inode->i_uid))
242		attr.ia_valid |= ATTR_UID;
243	if (!gid_eq(attr.ia_gid, inode->i_gid))
244		attr.ia_valid |= ATTR_GID;
245
246	if (!attr.ia_valid)
247		return 0;
248
249	err = dquot_transfer(inode, &attr);
250	if (err)
251		set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
252	return err;
253}
254
255static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
256{
257	if (ri->i_inline & F2FS_PIN_FILE)
258		set_inode_flag(inode, FI_PIN_FILE);
259	else
260		clear_inode_flag(inode, FI_PIN_FILE);
261	if (ri->i_inline & F2FS_DATA_EXIST)
262		set_inode_flag(inode, FI_DATA_EXIST);
263	else
264		clear_inode_flag(inode, FI_DATA_EXIST);
 
 
265}
266
267static int recover_inode(struct inode *inode, struct page *page)
268{
269	struct f2fs_inode *raw = F2FS_INODE(page);
270	char *name;
271	int err;
272
273	inode->i_mode = le16_to_cpu(raw->i_mode);
274
275	err = recover_quota_data(inode, page);
276	if (err)
277		return err;
278
279	i_uid_write(inode, le32_to_cpu(raw->i_uid));
280	i_gid_write(inode, le32_to_cpu(raw->i_gid));
281
282	if (raw->i_inline & F2FS_EXTRA_ATTR) {
283		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
284			F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
285								i_projid)) {
286			projid_t i_projid;
287			kprojid_t kprojid;
288
289			i_projid = (projid_t)le32_to_cpu(raw->i_projid);
290			kprojid = make_kprojid(&init_user_ns, i_projid);
291
292			if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
293				err = f2fs_transfer_project_quota(inode,
294								kprojid);
295				if (err)
296					return err;
297				F2FS_I(inode)->i_projid = kprojid;
298			}
299		}
300	}
301
302	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
303	inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
304	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
305	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
306	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
307	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
308	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
309
310	F2FS_I(inode)->i_advise = raw->i_advise;
311	F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
312	f2fs_set_inode_flags(inode);
313	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
314				le16_to_cpu(raw->i_gc_failures);
315
316	recover_inline_flags(inode, raw);
317
318	f2fs_mark_inode_dirty_sync(inode, true);
319
320	if (file_enc_name(inode))
321		name = "<encrypted>";
322	else
323		name = F2FS_INODE(page)->i_name;
324
325	f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
326		    ino_of_node(page), name, raw->i_inline);
327	return 0;
328}
329
330static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
331				bool check_only)
332{
333	struct curseg_info *curseg;
334	struct page *page = NULL;
335	block_t blkaddr;
336	unsigned int loop_cnt = 0;
337	unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
338						valid_user_blocks(sbi);
339	int err = 0;
340
341	/* get node pages in the current segment */
342	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
343	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
344
345	while (1) {
346		struct fsync_inode_entry *entry;
347
348		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
349			return 0;
350
351		page = f2fs_get_tmp_page(sbi, blkaddr);
352		if (IS_ERR(page)) {
353			err = PTR_ERR(page);
354			break;
355		}
356
357		if (!is_recoverable_dnode(page)) {
358			f2fs_put_page(page, 1);
359			break;
360		}
361
362		if (!is_fsync_dnode(page))
363			goto next;
364
365		entry = get_fsync_inode(head, ino_of_node(page));
366		if (!entry) {
367			bool quota_inode = false;
368
369			if (!check_only &&
370					IS_INODE(page) && is_dent_dnode(page)) {
371				err = f2fs_recover_inode_page(sbi, page);
372				if (err) {
373					f2fs_put_page(page, 1);
374					break;
375				}
376				quota_inode = true;
377			}
378
379			/*
380			 * CP | dnode(F) | inode(DF)
381			 * For this case, we should not give up now.
382			 */
383			entry = add_fsync_inode(sbi, head, ino_of_node(page),
384								quota_inode);
385			if (IS_ERR(entry)) {
386				err = PTR_ERR(entry);
387				if (err == -ENOENT) {
388					err = 0;
389					goto next;
390				}
391				f2fs_put_page(page, 1);
392				break;
393			}
394		}
395		entry->blkaddr = blkaddr;
396
397		if (IS_INODE(page) && is_dent_dnode(page))
398			entry->last_dentry = blkaddr;
399next:
400		/* sanity check in order to detect looped node chain */
401		if (++loop_cnt >= free_blocks ||
402			blkaddr == next_blkaddr_of_node(page)) {
403			f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
404				    __func__, blkaddr,
405				    next_blkaddr_of_node(page));
406			f2fs_put_page(page, 1);
407			err = -EINVAL;
408			break;
409		}
410
411		/* check next segment */
412		blkaddr = next_blkaddr_of_node(page);
413		f2fs_put_page(page, 1);
414
415		f2fs_ra_meta_pages_cond(sbi, blkaddr);
416	}
 
417	return err;
418}
419
420static void destroy_fsync_dnodes(struct list_head *head, int drop)
421{
422	struct fsync_inode_entry *entry, *tmp;
423
424	list_for_each_entry_safe(entry, tmp, head, list)
425		del_fsync_inode(entry, drop);
426}
427
428static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
429			block_t blkaddr, struct dnode_of_data *dn)
430{
431	struct seg_entry *sentry;
432	unsigned int segno = GET_SEGNO(sbi, blkaddr);
433	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
434	struct f2fs_summary_block *sum_node;
435	struct f2fs_summary sum;
436	struct page *sum_page, *node_page;
437	struct dnode_of_data tdn = *dn;
438	nid_t ino, nid;
439	struct inode *inode;
440	unsigned int offset;
441	block_t bidx;
442	int i;
443
444	sentry = get_seg_entry(sbi, segno);
445	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
446		return 0;
447
448	/* Get the previous summary */
449	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
450		struct curseg_info *curseg = CURSEG_I(sbi, i);
451		if (curseg->segno == segno) {
452			sum = curseg->sum_blk->entries[blkoff];
453			goto got_it;
454		}
455	}
456
457	sum_page = f2fs_get_sum_page(sbi, segno);
458	if (IS_ERR(sum_page))
459		return PTR_ERR(sum_page);
460	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
461	sum = sum_node->entries[blkoff];
462	f2fs_put_page(sum_page, 1);
463got_it:
464	/* Use the locked dnode page and inode */
465	nid = le32_to_cpu(sum.nid);
466	if (dn->inode->i_ino == nid) {
467		tdn.nid = nid;
468		if (!dn->inode_page_locked)
469			lock_page(dn->inode_page);
470		tdn.node_page = dn->inode_page;
471		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
472		goto truncate_out;
473	} else if (dn->nid == nid) {
474		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
475		goto truncate_out;
476	}
477
478	/* Get the node page */
479	node_page = f2fs_get_node_page(sbi, nid);
480	if (IS_ERR(node_page))
481		return PTR_ERR(node_page);
482
483	offset = ofs_of_node(node_page);
484	ino = ino_of_node(node_page);
485	f2fs_put_page(node_page, 1);
486
487	if (ino != dn->inode->i_ino) {
488		int ret;
489
490		/* Deallocate previous index in the node page */
491		inode = f2fs_iget_retry(sbi->sb, ino);
492		if (IS_ERR(inode))
493			return PTR_ERR(inode);
494
495		ret = dquot_initialize(inode);
496		if (ret) {
497			iput(inode);
498			return ret;
499		}
500	} else {
501		inode = dn->inode;
502	}
503
504	bidx = f2fs_start_bidx_of_node(offset, inode) +
505				le16_to_cpu(sum.ofs_in_node);
506
507	/*
508	 * if inode page is locked, unlock temporarily, but its reference
509	 * count keeps alive.
510	 */
511	if (ino == dn->inode->i_ino && dn->inode_page_locked)
512		unlock_page(dn->inode_page);
513
514	set_new_dnode(&tdn, inode, NULL, NULL, 0);
515	if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
516		goto out;
517
518	if (tdn.data_blkaddr == blkaddr)
519		f2fs_truncate_data_blocks_range(&tdn, 1);
520
521	f2fs_put_dnode(&tdn);
522out:
523	if (ino != dn->inode->i_ino)
524		iput(inode);
525	else if (dn->inode_page_locked)
526		lock_page(dn->inode_page);
527	return 0;
528
529truncate_out:
530	if (f2fs_data_blkaddr(&tdn) == blkaddr)
531		f2fs_truncate_data_blocks_range(&tdn, 1);
 
532	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
533		unlock_page(dn->inode_page);
534	return 0;
535}
536
537static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
538					struct page *page)
539{
540	struct dnode_of_data dn;
541	struct node_info ni;
542	unsigned int start, end;
543	int err = 0, recovered = 0;
544
545	/* step 1: recover xattr */
546	if (IS_INODE(page)) {
547		err = f2fs_recover_inline_xattr(inode, page);
548		if (err)
549			goto out;
550	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
551		err = f2fs_recover_xattr_data(inode, page);
552		if (!err)
553			recovered++;
554		goto out;
555	}
556
557	/* step 2: recover inline data */
558	err = f2fs_recover_inline_data(inode, page);
559	if (err) {
560		if (err == 1)
561			err = 0;
562		goto out;
563	}
564
565	/* step 3: recover data indices */
566	start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
567	end = start + ADDRS_PER_PAGE(page, inode);
568
569	set_new_dnode(&dn, inode, NULL, NULL, 0);
570retry_dn:
571	err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
572	if (err) {
573		if (err == -ENOMEM) {
574			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
575			goto retry_dn;
576		}
577		goto out;
578	}
579
580	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
581
582	err = f2fs_get_node_info(sbi, dn.nid, &ni);
583	if (err)
584		goto err;
585
 
586	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
587
588	if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
589		f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
590			  inode->i_ino, ofs_of_node(dn.node_page),
591			  ofs_of_node(page));
592		err = -EFSCORRUPTED;
593		goto err;
594	}
595
596	for (; start < end; start++, dn.ofs_in_node++) {
597		block_t src, dest;
598
599		src = f2fs_data_blkaddr(&dn);
600		dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
601
602		if (__is_valid_data_blkaddr(src) &&
603			!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
604			err = -EFSCORRUPTED;
605			goto err;
606		}
607
608		if (__is_valid_data_blkaddr(dest) &&
609			!f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
610			err = -EFSCORRUPTED;
611			goto err;
612		}
613
614		/* skip recovering if dest is the same as src */
615		if (src == dest)
616			continue;
617
618		/* dest is invalid, just invalidate src block */
619		if (dest == NULL_ADDR) {
620			f2fs_truncate_data_blocks_range(&dn, 1);
621			continue;
622		}
623
624		if (!file_keep_isize(inode) &&
625			(i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
626			f2fs_i_size_write(inode,
627				(loff_t)(start + 1) << PAGE_SHIFT);
628
629		/*
630		 * dest is reserved block, invalidate src block
631		 * and then reserve one new block in dnode page.
632		 */
633		if (dest == NEW_ADDR) {
634			f2fs_truncate_data_blocks_range(&dn, 1);
635			f2fs_reserve_new_block(&dn);
636			continue;
637		}
638
639		/* dest is valid block, try to recover from src to dest */
640		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
641
642			if (src == NULL_ADDR) {
643				err = f2fs_reserve_new_block(&dn);
644				while (err &&
645				       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
646					err = f2fs_reserve_new_block(&dn);
 
647				/* We should not get -ENOSPC */
648				f2fs_bug_on(sbi, err);
649				if (err)
650					goto err;
651			}
652retry_prev:
653			/* Check the previous node page having this index */
654			err = check_index_in_prev_nodes(sbi, dest, &dn);
655			if (err) {
656				if (err == -ENOMEM) {
657					congestion_wait(BLK_RW_ASYNC,
658							DEFAULT_IO_TIMEOUT);
659					goto retry_prev;
660				}
661				goto err;
662			}
663
664			/* write dummy data page */
665			f2fs_replace_block(sbi, &dn, src, dest,
666						ni.version, false, false);
667			recovered++;
668		}
669	}
670
671	copy_node_footer(dn.node_page, page);
672	fill_node_footer(dn.node_page, dn.nid, ni.ino,
673					ofs_of_node(page), false);
674	set_page_dirty(dn.node_page);
675err:
676	f2fs_put_dnode(&dn);
677out:
678	f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
679		    inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
680		    recovered, err);
 
 
681	return err;
682}
683
684static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
685		struct list_head *tmp_inode_list, struct list_head *dir_list)
686{
687	struct curseg_info *curseg;
688	struct page *page = NULL;
689	int err = 0;
690	block_t blkaddr;
691
692	/* get node pages in the current segment */
693	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
694	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
695
696	while (1) {
697		struct fsync_inode_entry *entry;
698
699		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
700			break;
701
702		f2fs_ra_meta_pages_cond(sbi, blkaddr);
703
704		page = f2fs_get_tmp_page(sbi, blkaddr);
705		if (IS_ERR(page)) {
706			err = PTR_ERR(page);
707			break;
708		}
709
710		if (!is_recoverable_dnode(page)) {
711			f2fs_put_page(page, 1);
712			break;
713		}
714
715		entry = get_fsync_inode(inode_list, ino_of_node(page));
716		if (!entry)
717			goto next;
718		/*
719		 * inode(x) | CP | inode(x) | dnode(F)
720		 * In this case, we can lose the latest inode(x).
721		 * So, call recover_inode for the inode update.
722		 */
723		if (IS_INODE(page)) {
724			err = recover_inode(entry->inode, page);
725			if (err) {
726				f2fs_put_page(page, 1);
727				break;
728			}
729		}
730		if (entry->last_dentry == blkaddr) {
731			err = recover_dentry(entry->inode, page, dir_list);
732			if (err) {
733				f2fs_put_page(page, 1);
734				break;
735			}
736		}
737		err = do_recover_data(sbi, entry->inode, page);
738		if (err) {
739			f2fs_put_page(page, 1);
740			break;
741		}
742
743		if (entry->blkaddr == blkaddr)
744			list_move_tail(&entry->list, tmp_inode_list);
745next:
746		/* check next segment */
747		blkaddr = next_blkaddr_of_node(page);
748		f2fs_put_page(page, 1);
749	}
750	if (!err)
751		f2fs_allocate_new_segments(sbi);
752	return err;
753}
754
755int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
756{
757	struct list_head inode_list, tmp_inode_list;
758	struct list_head dir_list;
759	int err;
760	int ret = 0;
761	unsigned long s_flags = sbi->sb->s_flags;
762	bool need_writecp = false;
763	bool fix_curseg_write_pointer = false;
764#ifdef CONFIG_QUOTA
765	int quota_enabled;
766#endif
767
768	if (s_flags & SB_RDONLY) {
769		f2fs_info(sbi, "recover fsync data on readonly fs");
770		sbi->sb->s_flags &= ~SB_RDONLY;
771	}
772
773#ifdef CONFIG_QUOTA
774	/* Needed for iput() to work correctly and not trash data */
775	sbi->sb->s_flags |= SB_ACTIVE;
776	/* Turn on quotas so that they are updated correctly */
777	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
778#endif
779
780	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
781			sizeof(struct fsync_inode_entry));
782	if (!fsync_entry_slab) {
783		err = -ENOMEM;
784		goto out;
785	}
786
787	INIT_LIST_HEAD(&inode_list);
788	INIT_LIST_HEAD(&tmp_inode_list);
789	INIT_LIST_HEAD(&dir_list);
790
791	/* prevent checkpoint */
792	mutex_lock(&sbi->cp_mutex);
793
794	/* step #1: find fsynced inode numbers */
795	err = find_fsync_dnodes(sbi, &inode_list, check_only);
796	if (err || list_empty(&inode_list))
797		goto skip;
798
799	if (check_only) {
800		ret = 1;
801		goto skip;
802	}
803
804	need_writecp = true;
805
806	/* step #2: recover data */
807	err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
808	if (!err)
809		f2fs_bug_on(sbi, !list_empty(&inode_list));
810	else {
811		/* restore s_flags to let iput() trash data */
812		sbi->sb->s_flags = s_flags;
813	}
814skip:
815	fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
816
817	destroy_fsync_dnodes(&inode_list, err);
818	destroy_fsync_dnodes(&tmp_inode_list, err);
819
820	/* truncate meta pages to be used by the recovery */
821	truncate_inode_pages_range(META_MAPPING(sbi),
822			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
823
824	if (err) {
825		truncate_inode_pages_final(NODE_MAPPING(sbi));
826		truncate_inode_pages_final(META_MAPPING(sbi));
827	}
828
829	/*
830	 * If fsync data succeeds or there is no fsync data to recover,
831	 * and the f2fs is not read only, check and fix zoned block devices'
832	 * write pointer consistency.
833	 */
834	if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
835			f2fs_sb_has_blkzoned(sbi)) {
836		err = f2fs_fix_curseg_write_pointer(sbi);
837		ret = err;
838	}
839
840	if (!err)
841		clear_sbi_flag(sbi, SBI_POR_DOING);
842
843	mutex_unlock(&sbi->cp_mutex);
844
845	/* let's drop all the directory inodes for clean checkpoint */
846	destroy_fsync_dnodes(&dir_list, err);
847
848	if (need_writecp) {
849		set_sbi_flag(sbi, SBI_IS_RECOVERED);
850
851		if (!err) {
852			struct cp_control cpc = {
853				.reason = CP_RECOVERY,
854			};
855			err = f2fs_write_checkpoint(sbi, &cpc);
856		}
857	}
858
859	kmem_cache_destroy(fsync_entry_slab);
860out:
861#ifdef CONFIG_QUOTA
862	/* Turn quotas off */
863	if (quota_enabled)
864		f2fs_quota_off_umount(sbi->sb);
865#endif
866	sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
867
868	return ret ? ret: err;
869}
v4.17
 
  1/*
  2 * fs/f2fs/recovery.c
  3 *
  4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5 *             http://www.samsung.com/
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#include <linux/fs.h>
 12#include <linux/f2fs_fs.h>
 13#include "f2fs.h"
 14#include "node.h"
 15#include "segment.h"
 16
 17/*
 18 * Roll forward recovery scenarios.
 19 *
 20 * [Term] F: fsync_mark, D: dentry_mark
 21 *
 22 * 1. inode(x) | CP | inode(x) | dnode(F)
 23 * -> Update the latest inode(x).
 24 *
 25 * 2. inode(x) | CP | inode(F) | dnode(F)
 26 * -> No problem.
 27 *
 28 * 3. inode(x) | CP | dnode(F) | inode(x)
 29 * -> Recover to the latest dnode(F), and drop the last inode(x)
 30 *
 31 * 4. inode(x) | CP | dnode(F) | inode(F)
 32 * -> No problem.
 33 *
 34 * 5. CP | inode(x) | dnode(F)
 35 * -> The inode(DF) was missing. Should drop this dnode(F).
 36 *
 37 * 6. CP | inode(DF) | dnode(F)
 38 * -> No problem.
 39 *
 40 * 7. CP | dnode(F) | inode(DF)
 41 * -> If f2fs_iget fails, then goto next to find inode(DF).
 42 *
 43 * 8. CP | dnode(F) | inode(x)
 44 * -> If f2fs_iget fails, then goto next to find inode(DF).
 45 *    But it will fail due to no inode(DF).
 46 */
 47
 48static struct kmem_cache *fsync_entry_slab;
 49
 50bool space_for_roll_forward(struct f2fs_sb_info *sbi)
 51{
 52	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
 53
 54	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
 55		return false;
 56	return true;
 57}
 58
 59static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
 60								nid_t ino)
 61{
 62	struct fsync_inode_entry *entry;
 63
 64	list_for_each_entry(entry, head, list)
 65		if (entry->inode->i_ino == ino)
 66			return entry;
 67
 68	return NULL;
 69}
 70
 71static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
 72			struct list_head *head, nid_t ino, bool quota_inode)
 73{
 74	struct inode *inode;
 75	struct fsync_inode_entry *entry;
 76	int err;
 77
 78	inode = f2fs_iget_retry(sbi->sb, ino);
 79	if (IS_ERR(inode))
 80		return ERR_CAST(inode);
 81
 82	err = dquot_initialize(inode);
 83	if (err)
 84		goto err_out;
 85
 86	if (quota_inode) {
 87		err = dquot_alloc_inode(inode);
 88		if (err)
 89			goto err_out;
 90	}
 91
 92	entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
 93	entry->inode = inode;
 94	list_add_tail(&entry->list, head);
 95
 96	return entry;
 97err_out:
 98	iput(inode);
 99	return ERR_PTR(err);
100}
101
102static void del_fsync_inode(struct fsync_inode_entry *entry)
103{
 
 
 
 
104	iput(entry->inode);
105	list_del(&entry->list);
106	kmem_cache_free(fsync_entry_slab, entry);
107}
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109static int recover_dentry(struct inode *inode, struct page *ipage,
110						struct list_head *dir_list)
111{
112	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
113	nid_t pino = le32_to_cpu(raw_inode->i_pino);
114	struct f2fs_dir_entry *de;
115	struct fscrypt_name fname;
 
116	struct page *page;
117	struct inode *dir, *einode;
118	struct fsync_inode_entry *entry;
119	int err = 0;
120	char *name;
121
122	entry = get_fsync_inode(dir_list, pino);
123	if (!entry) {
124		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
125							pino, false);
126		if (IS_ERR(entry)) {
127			dir = ERR_CAST(entry);
128			err = PTR_ERR(entry);
129			goto out;
130		}
131	}
132
133	dir = entry->inode;
134
135	memset(&fname, 0, sizeof(struct fscrypt_name));
136	fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
137	fname.disk_name.name = raw_inode->i_name;
138
139	if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
140		WARN_ON(1);
141		err = -ENAMETOOLONG;
142		goto out;
143	}
144retry:
145	de = __f2fs_find_entry(dir, &fname, &page);
146	if (de && inode->i_ino == le32_to_cpu(de->ino))
147		goto out_put;
148
149	if (de) {
150		einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
151		if (IS_ERR(einode)) {
152			WARN_ON(1);
153			err = PTR_ERR(einode);
154			if (err == -ENOENT)
155				err = -EEXIST;
156			goto out_put;
157		}
158
159		err = dquot_initialize(einode);
160		if (err) {
161			iput(einode);
162			goto out_put;
163		}
164
165		err = acquire_orphan_inode(F2FS_I_SB(inode));
166		if (err) {
167			iput(einode);
168			goto out_put;
169		}
170		f2fs_delete_entry(de, page, dir, einode);
171		iput(einode);
172		goto retry;
173	} else if (IS_ERR(page)) {
174		err = PTR_ERR(page);
175	} else {
176		err = __f2fs_do_add_link(dir, &fname, inode,
177					inode->i_ino, inode->i_mode);
178	}
179	if (err == -ENOMEM)
180		goto retry;
181	goto out;
182
183out_put:
184	f2fs_put_page(page, 0);
185out:
186	if (file_enc_name(inode))
187		name = "<encrypted>";
188	else
189		name = raw_inode->i_name;
190	f2fs_msg(inode->i_sb, KERN_NOTICE,
191			"%s: ino = %x, name = %s, dir = %lx, err = %d",
192			__func__, ino_of_node(ipage), name,
193			IS_ERR(dir) ? 0 : dir->i_ino, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194	return err;
195}
196
197static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
198{
199	if (ri->i_inline & F2FS_PIN_FILE)
200		set_inode_flag(inode, FI_PIN_FILE);
201	else
202		clear_inode_flag(inode, FI_PIN_FILE);
203	if (ri->i_inline & F2FS_DATA_EXIST)
204		set_inode_flag(inode, FI_DATA_EXIST);
205	else
206		clear_inode_flag(inode, FI_DATA_EXIST);
207	if (!(ri->i_inline & F2FS_INLINE_DOTS))
208		clear_inode_flag(inode, FI_INLINE_DOTS);
209}
210
211static void recover_inode(struct inode *inode, struct page *page)
212{
213	struct f2fs_inode *raw = F2FS_INODE(page);
214	char *name;
 
215
216	inode->i_mode = le16_to_cpu(raw->i_mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
218	inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
219	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
220	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
221	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
222	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
223	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
224
225	F2FS_I(inode)->i_advise = raw->i_advise;
 
 
 
 
226
227	recover_inline_flags(inode, raw);
228
 
 
229	if (file_enc_name(inode))
230		name = "<encrypted>";
231	else
232		name = F2FS_INODE(page)->i_name;
233
234	f2fs_msg(inode->i_sb, KERN_NOTICE,
235		"recover_inode: ino = %x, name = %s, inline = %x",
236			ino_of_node(page), name, raw->i_inline);
237}
238
239static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
240				bool check_only)
241{
242	struct curseg_info *curseg;
243	struct page *page = NULL;
244	block_t blkaddr;
245	unsigned int loop_cnt = 0;
246	unsigned int free_blocks = sbi->user_block_count -
247					valid_user_blocks(sbi);
248	int err = 0;
249
250	/* get node pages in the current segment */
251	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
252	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
253
254	while (1) {
255		struct fsync_inode_entry *entry;
256
257		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
258			return 0;
259
260		page = get_tmp_page(sbi, blkaddr);
 
 
 
 
261
262		if (!is_recoverable_dnode(page))
 
263			break;
 
264
265		if (!is_fsync_dnode(page))
266			goto next;
267
268		entry = get_fsync_inode(head, ino_of_node(page));
269		if (!entry) {
270			bool quota_inode = false;
271
272			if (!check_only &&
273					IS_INODE(page) && is_dent_dnode(page)) {
274				err = recover_inode_page(sbi, page);
275				if (err)
 
276					break;
 
277				quota_inode = true;
278			}
279
280			/*
281			 * CP | dnode(F) | inode(DF)
282			 * For this case, we should not give up now.
283			 */
284			entry = add_fsync_inode(sbi, head, ino_of_node(page),
285								quota_inode);
286			if (IS_ERR(entry)) {
287				err = PTR_ERR(entry);
288				if (err == -ENOENT) {
289					err = 0;
290					goto next;
291				}
 
292				break;
293			}
294		}
295		entry->blkaddr = blkaddr;
296
297		if (IS_INODE(page) && is_dent_dnode(page))
298			entry->last_dentry = blkaddr;
299next:
300		/* sanity check in order to detect looped node chain */
301		if (++loop_cnt >= free_blocks ||
302			blkaddr == next_blkaddr_of_node(page)) {
303			f2fs_msg(sbi->sb, KERN_NOTICE,
304				"%s: detect looped node chain, "
305				"blkaddr:%u, next:%u",
306				__func__, blkaddr, next_blkaddr_of_node(page));
307			err = -EINVAL;
308			break;
309		}
310
311		/* check next segment */
312		blkaddr = next_blkaddr_of_node(page);
313		f2fs_put_page(page, 1);
314
315		ra_meta_pages_cond(sbi, blkaddr);
316	}
317	f2fs_put_page(page, 1);
318	return err;
319}
320
321static void destroy_fsync_dnodes(struct list_head *head)
322{
323	struct fsync_inode_entry *entry, *tmp;
324
325	list_for_each_entry_safe(entry, tmp, head, list)
326		del_fsync_inode(entry);
327}
328
329static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
330			block_t blkaddr, struct dnode_of_data *dn)
331{
332	struct seg_entry *sentry;
333	unsigned int segno = GET_SEGNO(sbi, blkaddr);
334	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
335	struct f2fs_summary_block *sum_node;
336	struct f2fs_summary sum;
337	struct page *sum_page, *node_page;
338	struct dnode_of_data tdn = *dn;
339	nid_t ino, nid;
340	struct inode *inode;
341	unsigned int offset;
342	block_t bidx;
343	int i;
344
345	sentry = get_seg_entry(sbi, segno);
346	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
347		return 0;
348
349	/* Get the previous summary */
350	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
351		struct curseg_info *curseg = CURSEG_I(sbi, i);
352		if (curseg->segno == segno) {
353			sum = curseg->sum_blk->entries[blkoff];
354			goto got_it;
355		}
356	}
357
358	sum_page = get_sum_page(sbi, segno);
 
 
359	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
360	sum = sum_node->entries[blkoff];
361	f2fs_put_page(sum_page, 1);
362got_it:
363	/* Use the locked dnode page and inode */
364	nid = le32_to_cpu(sum.nid);
365	if (dn->inode->i_ino == nid) {
366		tdn.nid = nid;
367		if (!dn->inode_page_locked)
368			lock_page(dn->inode_page);
369		tdn.node_page = dn->inode_page;
370		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
371		goto truncate_out;
372	} else if (dn->nid == nid) {
373		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
374		goto truncate_out;
375	}
376
377	/* Get the node page */
378	node_page = get_node_page(sbi, nid);
379	if (IS_ERR(node_page))
380		return PTR_ERR(node_page);
381
382	offset = ofs_of_node(node_page);
383	ino = ino_of_node(node_page);
384	f2fs_put_page(node_page, 1);
385
386	if (ino != dn->inode->i_ino) {
387		int ret;
388
389		/* Deallocate previous index in the node page */
390		inode = f2fs_iget_retry(sbi->sb, ino);
391		if (IS_ERR(inode))
392			return PTR_ERR(inode);
393
394		ret = dquot_initialize(inode);
395		if (ret) {
396			iput(inode);
397			return ret;
398		}
399	} else {
400		inode = dn->inode;
401	}
402
403	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
 
404
405	/*
406	 * if inode page is locked, unlock temporarily, but its reference
407	 * count keeps alive.
408	 */
409	if (ino == dn->inode->i_ino && dn->inode_page_locked)
410		unlock_page(dn->inode_page);
411
412	set_new_dnode(&tdn, inode, NULL, NULL, 0);
413	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
414		goto out;
415
416	if (tdn.data_blkaddr == blkaddr)
417		truncate_data_blocks_range(&tdn, 1);
418
419	f2fs_put_dnode(&tdn);
420out:
421	if (ino != dn->inode->i_ino)
422		iput(inode);
423	else if (dn->inode_page_locked)
424		lock_page(dn->inode_page);
425	return 0;
426
427truncate_out:
428	if (datablock_addr(tdn.inode, tdn.node_page,
429					tdn.ofs_in_node) == blkaddr)
430		truncate_data_blocks_range(&tdn, 1);
431	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
432		unlock_page(dn->inode_page);
433	return 0;
434}
435
436static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
437					struct page *page)
438{
439	struct dnode_of_data dn;
440	struct node_info ni;
441	unsigned int start, end;
442	int err = 0, recovered = 0;
443
444	/* step 1: recover xattr */
445	if (IS_INODE(page)) {
446		recover_inline_xattr(inode, page);
 
 
447	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
448		err = recover_xattr_data(inode, page);
449		if (!err)
450			recovered++;
451		goto out;
452	}
453
454	/* step 2: recover inline data */
455	if (recover_inline_data(inode, page))
 
 
 
456		goto out;
 
457
458	/* step 3: recover data indices */
459	start = start_bidx_of_node(ofs_of_node(page), inode);
460	end = start + ADDRS_PER_PAGE(page, inode);
461
462	set_new_dnode(&dn, inode, NULL, NULL, 0);
463retry_dn:
464	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
465	if (err) {
466		if (err == -ENOMEM) {
467			congestion_wait(BLK_RW_ASYNC, HZ/50);
468			goto retry_dn;
469		}
470		goto out;
471	}
472
473	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
 
 
 
474
475	get_node_info(sbi, dn.nid, &ni);
476	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
477	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
 
 
 
 
 
 
478
479	for (; start < end; start++, dn.ofs_in_node++) {
480		block_t src, dest;
481
482		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
483		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
 
 
 
 
 
 
 
 
 
 
 
 
484
485		/* skip recovering if dest is the same as src */
486		if (src == dest)
487			continue;
488
489		/* dest is invalid, just invalidate src block */
490		if (dest == NULL_ADDR) {
491			truncate_data_blocks_range(&dn, 1);
492			continue;
493		}
494
495		if (!file_keep_isize(inode) &&
496			(i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
497			f2fs_i_size_write(inode,
498				(loff_t)(start + 1) << PAGE_SHIFT);
499
500		/*
501		 * dest is reserved block, invalidate src block
502		 * and then reserve one new block in dnode page.
503		 */
504		if (dest == NEW_ADDR) {
505			truncate_data_blocks_range(&dn, 1);
506			reserve_new_block(&dn);
507			continue;
508		}
509
510		/* dest is valid block, try to recover from src to dest */
511		if (is_valid_blkaddr(sbi, dest, META_POR)) {
512
513			if (src == NULL_ADDR) {
514				err = reserve_new_block(&dn);
515#ifdef CONFIG_F2FS_FAULT_INJECTION
516				while (err)
517					err = reserve_new_block(&dn);
518#endif
519				/* We should not get -ENOSPC */
520				f2fs_bug_on(sbi, err);
521				if (err)
522					goto err;
523			}
524retry_prev:
525			/* Check the previous node page having this index */
526			err = check_index_in_prev_nodes(sbi, dest, &dn);
527			if (err) {
528				if (err == -ENOMEM) {
529					congestion_wait(BLK_RW_ASYNC, HZ/50);
 
530					goto retry_prev;
531				}
532				goto err;
533			}
534
535			/* write dummy data page */
536			f2fs_replace_block(sbi, &dn, src, dest,
537						ni.version, false, false);
538			recovered++;
539		}
540	}
541
542	copy_node_footer(dn.node_page, page);
543	fill_node_footer(dn.node_page, dn.nid, ni.ino,
544					ofs_of_node(page), false);
545	set_page_dirty(dn.node_page);
546err:
547	f2fs_put_dnode(&dn);
548out:
549	f2fs_msg(sbi->sb, KERN_NOTICE,
550		"recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
551		inode->i_ino,
552		file_keep_isize(inode) ? "keep" : "recover",
553		recovered, err);
554	return err;
555}
556
557static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
558						struct list_head *dir_list)
559{
560	struct curseg_info *curseg;
561	struct page *page = NULL;
562	int err = 0;
563	block_t blkaddr;
564
565	/* get node pages in the current segment */
566	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
567	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
568
569	while (1) {
570		struct fsync_inode_entry *entry;
571
572		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
573			break;
574
575		ra_meta_pages_cond(sbi, blkaddr);
576
577		page = get_tmp_page(sbi, blkaddr);
 
 
 
 
578
579		if (!is_recoverable_dnode(page)) {
580			f2fs_put_page(page, 1);
581			break;
582		}
583
584		entry = get_fsync_inode(inode_list, ino_of_node(page));
585		if (!entry)
586			goto next;
587		/*
588		 * inode(x) | CP | inode(x) | dnode(F)
589		 * In this case, we can lose the latest inode(x).
590		 * So, call recover_inode for the inode update.
591		 */
592		if (IS_INODE(page))
593			recover_inode(entry->inode, page);
 
 
 
 
 
594		if (entry->last_dentry == blkaddr) {
595			err = recover_dentry(entry->inode, page, dir_list);
596			if (err) {
597				f2fs_put_page(page, 1);
598				break;
599			}
600		}
601		err = do_recover_data(sbi, entry->inode, page);
602		if (err) {
603			f2fs_put_page(page, 1);
604			break;
605		}
606
607		if (entry->blkaddr == blkaddr)
608			del_fsync_inode(entry);
609next:
610		/* check next segment */
611		blkaddr = next_blkaddr_of_node(page);
612		f2fs_put_page(page, 1);
613	}
614	if (!err)
615		allocate_new_segments(sbi);
616	return err;
617}
618
619int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
620{
621	struct list_head inode_list;
622	struct list_head dir_list;
623	int err;
624	int ret = 0;
625	unsigned long s_flags = sbi->sb->s_flags;
626	bool need_writecp = false;
 
627#ifdef CONFIG_QUOTA
628	int quota_enabled;
629#endif
630
631	if (s_flags & SB_RDONLY) {
632		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
633		sbi->sb->s_flags &= ~SB_RDONLY;
634	}
635
636#ifdef CONFIG_QUOTA
637	/* Needed for iput() to work correctly and not trash data */
638	sbi->sb->s_flags |= SB_ACTIVE;
639	/* Turn on quotas so that they are updated correctly */
640	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
641#endif
642
643	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
644			sizeof(struct fsync_inode_entry));
645	if (!fsync_entry_slab) {
646		err = -ENOMEM;
647		goto out;
648	}
649
650	INIT_LIST_HEAD(&inode_list);
 
651	INIT_LIST_HEAD(&dir_list);
652
653	/* prevent checkpoint */
654	mutex_lock(&sbi->cp_mutex);
655
656	/* step #1: find fsynced inode numbers */
657	err = find_fsync_dnodes(sbi, &inode_list, check_only);
658	if (err || list_empty(&inode_list))
659		goto skip;
660
661	if (check_only) {
662		ret = 1;
663		goto skip;
664	}
665
666	need_writecp = true;
667
668	/* step #2: recover data */
669	err = recover_data(sbi, &inode_list, &dir_list);
670	if (!err)
671		f2fs_bug_on(sbi, !list_empty(&inode_list));
 
 
 
 
672skip:
673	destroy_fsync_dnodes(&inode_list);
 
 
 
674
675	/* truncate meta pages to be used by the recovery */
676	truncate_inode_pages_range(META_MAPPING(sbi),
677			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
678
679	if (err) {
680		truncate_inode_pages_final(NODE_MAPPING(sbi));
681		truncate_inode_pages_final(META_MAPPING(sbi));
682	}
683
684	clear_sbi_flag(sbi, SBI_POR_DOING);
 
 
 
 
 
 
 
 
 
 
 
 
 
685	mutex_unlock(&sbi->cp_mutex);
686
687	/* let's drop all the directory inodes for clean checkpoint */
688	destroy_fsync_dnodes(&dir_list);
689
690	if (!err && need_writecp) {
691		struct cp_control cpc = {
692			.reason = CP_RECOVERY,
693		};
694		err = write_checkpoint(sbi, &cpc);
 
 
 
 
695	}
696
697	kmem_cache_destroy(fsync_entry_slab);
698out:
699#ifdef CONFIG_QUOTA
700	/* Turn quotas off */
701	if (quota_enabled)
702		f2fs_quota_off_umount(sbi->sb);
703#endif
704	sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
705
706	return ret ? ret: err;
707}