Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * fs/f2fs/segment.h
  4 *
  5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6 *             http://www.samsung.com/
  7 */
  8#include <linux/blkdev.h>
  9#include <linux/backing-dev.h>
 10
 11/* constant macro */
 12#define NULL_SEGNO			((unsigned int)(~0))
 13#define NULL_SECNO			((unsigned int)(~0))
 14
 15#define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
 16#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
 17
 18#define F2FS_MIN_SEGMENTS	9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
 
 19
 20/* L: Logical segment # in volume, R: Relative segment # in main area */
 21#define GET_L2R_SEGNO(free_i, segno)	((segno) - (free_i)->start_segno)
 22#define GET_R2L_SEGNO(free_i, segno)	((segno) + (free_i)->start_segno)
 23
 24#define IS_DATASEG(t)	((t) <= CURSEG_COLD_DATA)
 25#define IS_NODESEG(t)	((t) >= CURSEG_HOT_NODE)
 
 
 
 
 
 
 
 26
 27#define IS_HOT(t)	((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
 28#define IS_WARM(t)	((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
 29#define IS_COLD(t)	((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
 30
 31#define IS_CURSEG(sbi, seg)						\
 32	(((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\
 33	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\
 34	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\
 35	 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\
 36	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\
 37	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
 
 
 38
 39#define IS_CURSEC(sbi, secno)						\
 40	(((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /		\
 41	  (sbi)->segs_per_sec) ||	\
 42	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno /		\
 43	  (sbi)->segs_per_sec) ||	\
 44	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno /		\
 45	  (sbi)->segs_per_sec) ||	\
 46	 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno /		\
 47	  (sbi)->segs_per_sec) ||	\
 48	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno /		\
 49	  (sbi)->segs_per_sec) ||	\
 50	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\
 51	  (sbi)->segs_per_sec))	\
 
 
 
 
 52
 53#define MAIN_BLKADDR(sbi)						\
 54	(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : 				\
 55		le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
 56#define SEG0_BLKADDR(sbi)						\
 57	(SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : 				\
 58		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
 59
 60#define MAIN_SEGS(sbi)	(SM_I(sbi)->main_segments)
 61#define MAIN_SECS(sbi)	((sbi)->total_sections)
 62
 63#define TOTAL_SEGS(sbi)							\
 64	(SM_I(sbi) ? SM_I(sbi)->segment_count : 				\
 65		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
 66#define TOTAL_BLKS(sbi)	(TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
 67
 68#define MAX_BLKADDR(sbi)	(SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
 69#define SEGMENT_SIZE(sbi)	(1ULL << ((sbi)->log_blocksize +	\
 70					(sbi)->log_blocks_per_seg))
 71
 72#define START_BLOCK(sbi, segno)	(SEG0_BLKADDR(sbi) +			\
 73	 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
 74
 75#define NEXT_FREE_BLKADDR(sbi, curseg)					\
 76	(START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
 77
 78#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)	((blk_addr) - SEG0_BLKADDR(sbi))
 79#define GET_SEGNO_FROM_SEG0(sbi, blk_addr)				\
 80	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
 81#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)				\
 82	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
 83
 84#define GET_SEGNO(sbi, blk_addr)					\
 85	((!__is_valid_data_blkaddr(blk_addr)) ?			\
 86	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
 87		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
 88#define BLKS_PER_SEC(sbi)					\
 89	((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
 
 
 
 
 
 
 90#define GET_SEC_FROM_SEG(sbi, segno)				\
 91	((segno) / (sbi)->segs_per_sec)
 92#define GET_SEG_FROM_SEC(sbi, secno)				\
 93	((secno) * (sbi)->segs_per_sec)
 94#define GET_ZONE_FROM_SEC(sbi, secno)				\
 95	((secno) / (sbi)->secs_per_zone)
 96#define GET_ZONE_FROM_SEG(sbi, segno)				\
 97	GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
 98
 99#define GET_SUM_BLOCK(sbi, segno)				\
100	((sbi)->sm_info->ssa_blkaddr + (segno))
101
102#define GET_SUM_TYPE(footer) ((footer)->entry_type)
103#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
104
105#define SIT_ENTRY_OFFSET(sit_i, segno)					\
106	((segno) % (sit_i)->sents_per_block)
107#define SIT_BLOCK_OFFSET(segno)					\
108	((segno) / SIT_ENTRY_PER_BLOCK)
109#define	START_SEGNO(segno)		\
110	(SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
111#define SIT_BLK_CNT(sbi)			\
112	DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
113#define f2fs_bitmap_size(nr)			\
114	(BITS_TO_LONGS(nr) * sizeof(unsigned long))
115
116#define SECTOR_FROM_BLOCK(blk_addr)					\
117	(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
118#define SECTOR_TO_BLOCK(sectors)					\
119	((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
120
121/*
122 * indicate a block allocation direction: RIGHT and LEFT.
123 * RIGHT means allocating new sections towards the end of volume.
124 * LEFT means the opposite direction.
125 */
126enum {
127	ALLOC_RIGHT = 0,
128	ALLOC_LEFT
129};
130
131/*
132 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
133 * LFS writes data sequentially with cleaning operations.
134 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
 
 
135 */
136enum {
137	LFS = 0,
138	SSR
 
139};
140
141/*
142 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
143 * GC_CB is based on cost-benefit algorithm.
144 * GC_GREEDY is based on greedy algorithm.
 
145 */
146enum {
147	GC_CB = 0,
148	GC_GREEDY,
 
149	ALLOC_NEXT,
150	FLUSH_DEVICE,
151	MAX_GC_POLICY,
152};
153
154/*
155 * BG_GC means the background cleaning job.
156 * FG_GC means the on-demand cleaning job.
157 * FORCE_FG_GC means on-demand cleaning job in background.
158 */
159enum {
160	BG_GC = 0,
161	FG_GC,
162	FORCE_FG_GC,
163};
164
165/* for a function parameter to select a victim segment */
166struct victim_sel_policy {
167	int alloc_mode;			/* LFS or SSR */
168	int gc_mode;			/* GC_CB or GC_GREEDY */
169	unsigned long *dirty_segmap;	/* dirty segment bitmap */
170	unsigned int max_search;	/* maximum # of segments to search */
 
 
 
171	unsigned int offset;		/* last scanned bitmap offset */
172	unsigned int ofs_unit;		/* bitmap search unit */
173	unsigned int min_cost;		/* minimum cost */
 
174	unsigned int min_segno;		/* segment # having min. cost */
 
 
175};
176
177struct seg_entry {
178	unsigned int type:6;		/* segment type like CURSEG_XXX_TYPE */
179	unsigned int valid_blocks:10;	/* # of valid blocks */
180	unsigned int ckpt_valid_blocks:10;	/* # of valid blocks last cp */
181	unsigned int padding:6;		/* padding */
182	unsigned char *cur_valid_map;	/* validity bitmap of blocks */
183#ifdef CONFIG_F2FS_CHECK_FS
184	unsigned char *cur_valid_map_mir;	/* mirror of current valid bitmap */
185#endif
186	/*
187	 * # of valid blocks and the validity bitmap stored in the the last
188	 * checkpoint pack. This information is used by the SSR mode.
189	 */
190	unsigned char *ckpt_valid_map;	/* validity bitmap of blocks last cp */
191	unsigned char *discard_map;
192	unsigned long long mtime;	/* modification time of the segment */
193};
194
195struct sec_entry {
196	unsigned int valid_blocks;	/* # of valid blocks in a section */
197};
198
199struct segment_allocation {
200	void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
201};
202
203/*
204 * this value is set in page as a private data which indicate that
205 * the page is atomically written, and it is in inmem_pages list.
206 */
207#define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
208#define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
209
210#define IS_ATOMIC_WRITTEN_PAGE(page)			\
211		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
212#define IS_DUMMY_WRITTEN_PAGE(page)			\
213		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
214
215#define MAX_SKIP_GC_COUNT			16
216
217struct inmem_pages {
218	struct list_head list;
219	struct page *page;
220	block_t old_addr;		/* for revoking when fail to commit */
 
221};
222
223struct sit_info {
224	const struct segment_allocation *s_ops;
225
226	block_t sit_base_addr;		/* start block address of SIT area */
227	block_t sit_blocks;		/* # of blocks used by SIT area */
228	block_t written_valid_blocks;	/* # of valid blocks in main area */
229	char *bitmap;			/* all bitmaps pointer */
230	char *sit_bitmap;		/* SIT bitmap pointer */
231#ifdef CONFIG_F2FS_CHECK_FS
232	char *sit_bitmap_mir;		/* SIT bitmap mirror */
233
234	/* bitmap of segments to be ignored by GC in case of errors */
235	unsigned long *invalid_segmap;
236#endif
237	unsigned int bitmap_size;	/* SIT bitmap size */
238
239	unsigned long *tmp_map;			/* bitmap for temporal use */
240	unsigned long *dirty_sentries_bitmap;	/* bitmap for dirty sentries */
241	unsigned int dirty_sentries;		/* # of dirty sentries */
242	unsigned int sents_per_block;		/* # of SIT entries per block */
243	struct rw_semaphore sentry_lock;	/* to protect SIT cache */
244	struct seg_entry *sentries;		/* SIT segment-level cache */
245	struct sec_entry *sec_entries;		/* SIT section-level cache */
246
247	/* for cost-benefit algorithm in cleaning procedure */
248	unsigned long long elapsed_time;	/* elapsed time after mount */
249	unsigned long long mounted_time;	/* mount time */
250	unsigned long long min_mtime;		/* min. modification time */
251	unsigned long long max_mtime;		/* max. modification time */
 
 
252
253	unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
254};
255
256struct free_segmap_info {
257	unsigned int start_segno;	/* start segment number logically */
258	unsigned int free_segments;	/* # of free segments */
259	unsigned int free_sections;	/* # of free sections */
260	spinlock_t segmap_lock;		/* free segmap lock */
261	unsigned long *free_segmap;	/* free segment bitmap */
262	unsigned long *free_secmap;	/* free section bitmap */
263};
264
265/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
266enum dirty_type {
267	DIRTY_HOT_DATA,		/* dirty segments assigned as hot data logs */
268	DIRTY_WARM_DATA,	/* dirty segments assigned as warm data logs */
269	DIRTY_COLD_DATA,	/* dirty segments assigned as cold data logs */
270	DIRTY_HOT_NODE,		/* dirty segments assigned as hot node logs */
271	DIRTY_WARM_NODE,	/* dirty segments assigned as warm node logs */
272	DIRTY_COLD_NODE,	/* dirty segments assigned as cold node logs */
273	DIRTY,			/* to count # of dirty segments */
274	PRE,			/* to count # of entirely obsolete segments */
275	NR_DIRTY_TYPE
276};
277
278struct dirty_seglist_info {
279	const struct victim_selection *v_ops;	/* victim selction operation */
280	unsigned long *dirty_segmap[NR_DIRTY_TYPE];
 
281	struct mutex seglist_lock;		/* lock for segment bitmaps */
282	int nr_dirty[NR_DIRTY_TYPE];		/* # of dirty segments */
283	unsigned long *victim_secmap;		/* background GC victims */
284};
285
286/* victim selection function for cleaning and SSR */
287struct victim_selection {
288	int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
289							int, int, char);
290};
291
292/* for active log information */
293struct curseg_info {
294	struct mutex curseg_mutex;		/* lock for consistency */
295	struct f2fs_summary_block *sum_blk;	/* cached summary block */
296	struct rw_semaphore journal_rwsem;	/* protect journal area */
297	struct f2fs_journal *journal;		/* cached journal info */
298	unsigned char alloc_type;		/* current allocation type */
 
299	unsigned int segno;			/* current segment number */
300	unsigned short next_blkoff;		/* next block offset to write */
301	unsigned int zone;			/* current zone number */
302	unsigned int next_segno;		/* preallocated segment */
 
 
303};
304
305struct sit_entry_set {
306	struct list_head set_list;	/* link with all sit sets */
307	unsigned int start_segno;	/* start segno of sits in set */
308	unsigned int entry_cnt;		/* the # of sit entries in set */
309};
310
311/*
312 * inline functions
313 */
314static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
315{
316	return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
317}
318
319static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
320						unsigned int segno)
321{
322	struct sit_info *sit_i = SIT_I(sbi);
323	return &sit_i->sentries[segno];
324}
325
326static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
327						unsigned int segno)
328{
329	struct sit_info *sit_i = SIT_I(sbi);
330	return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
331}
332
333static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
334				unsigned int segno, bool use_section)
335{
336	/*
337	 * In order to get # of valid blocks in a section instantly from many
338	 * segments, f2fs manages two counting structures separately.
339	 */
340	if (use_section && __is_large_section(sbi))
341		return get_sec_entry(sbi, segno)->valid_blocks;
342	else
343		return get_seg_entry(sbi, segno)->valid_blocks;
344}
345
346static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
347				unsigned int segno)
348{
 
 
 
 
 
 
 
 
 
 
 
 
349	return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
350}
351
352static inline void seg_info_from_raw_sit(struct seg_entry *se,
353					struct f2fs_sit_entry *rs)
354{
355	se->valid_blocks = GET_SIT_VBLOCKS(rs);
356	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
357	memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
358	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
359#ifdef CONFIG_F2FS_CHECK_FS
360	memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
361#endif
362	se->type = GET_SIT_TYPE(rs);
363	se->mtime = le64_to_cpu(rs->mtime);
364}
365
366static inline void __seg_info_to_raw_sit(struct seg_entry *se,
367					struct f2fs_sit_entry *rs)
368{
369	unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
370					se->valid_blocks;
371	rs->vblocks = cpu_to_le16(raw_vblocks);
372	memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
373	rs->mtime = cpu_to_le64(se->mtime);
374}
375
376static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
377				struct page *page, unsigned int start)
378{
379	struct f2fs_sit_block *raw_sit;
380	struct seg_entry *se;
381	struct f2fs_sit_entry *rs;
382	unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
383					(unsigned long)MAIN_SEGS(sbi));
384	int i;
385
386	raw_sit = (struct f2fs_sit_block *)page_address(page);
387	memset(raw_sit, 0, PAGE_SIZE);
388	for (i = 0; i < end - start; i++) {
389		rs = &raw_sit->entries[i];
390		se = get_seg_entry(sbi, start + i);
391		__seg_info_to_raw_sit(se, rs);
392	}
393}
394
395static inline void seg_info_to_raw_sit(struct seg_entry *se,
396					struct f2fs_sit_entry *rs)
397{
398	__seg_info_to_raw_sit(se, rs);
399
400	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
401	se->ckpt_valid_blocks = se->valid_blocks;
402}
403
404static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
405		unsigned int max, unsigned int segno)
406{
407	unsigned int ret;
408	spin_lock(&free_i->segmap_lock);
409	ret = find_next_bit(free_i->free_segmap, max, segno);
410	spin_unlock(&free_i->segmap_lock);
411	return ret;
412}
413
414static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
415{
416	struct free_segmap_info *free_i = FREE_I(sbi);
417	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
418	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
419	unsigned int next;
 
420
421	spin_lock(&free_i->segmap_lock);
422	clear_bit(segno, free_i->free_segmap);
423	free_i->free_segments++;
424
425	next = find_next_bit(free_i->free_segmap,
426			start_segno + sbi->segs_per_sec, start_segno);
427	if (next >= start_segno + sbi->segs_per_sec) {
428		clear_bit(secno, free_i->free_secmap);
429		free_i->free_sections++;
430	}
431	spin_unlock(&free_i->segmap_lock);
432}
433
434static inline void __set_inuse(struct f2fs_sb_info *sbi,
435		unsigned int segno)
436{
437	struct free_segmap_info *free_i = FREE_I(sbi);
438	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
439
440	set_bit(segno, free_i->free_segmap);
441	free_i->free_segments--;
442	if (!test_and_set_bit(secno, free_i->free_secmap))
443		free_i->free_sections--;
444}
445
446static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
447		unsigned int segno)
448{
449	struct free_segmap_info *free_i = FREE_I(sbi);
450	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
451	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
452	unsigned int next;
 
453
454	spin_lock(&free_i->segmap_lock);
455	if (test_and_clear_bit(segno, free_i->free_segmap)) {
456		free_i->free_segments++;
457
458		if (IS_CURSEC(sbi, secno))
459			goto skip_free;
460		next = find_next_bit(free_i->free_segmap,
461				start_segno + sbi->segs_per_sec, start_segno);
462		if (next >= start_segno + sbi->segs_per_sec) {
463			if (test_and_clear_bit(secno, free_i->free_secmap))
464				free_i->free_sections++;
465		}
466	}
467skip_free:
468	spin_unlock(&free_i->segmap_lock);
469}
470
471static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
472		unsigned int segno)
473{
474	struct free_segmap_info *free_i = FREE_I(sbi);
475	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
476
477	spin_lock(&free_i->segmap_lock);
478	if (!test_and_set_bit(segno, free_i->free_segmap)) {
479		free_i->free_segments--;
480		if (!test_and_set_bit(secno, free_i->free_secmap))
481			free_i->free_sections--;
482	}
483	spin_unlock(&free_i->segmap_lock);
484}
485
486static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
487		void *dst_addr)
488{
489	struct sit_info *sit_i = SIT_I(sbi);
490
491#ifdef CONFIG_F2FS_CHECK_FS
492	if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
493						sit_i->bitmap_size))
494		f2fs_bug_on(sbi, 1);
495#endif
496	memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
497}
498
499static inline block_t written_block_count(struct f2fs_sb_info *sbi)
500{
501	return SIT_I(sbi)->written_valid_blocks;
502}
503
504static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
505{
506	return FREE_I(sbi)->free_segments;
507}
508
509static inline int reserved_segments(struct f2fs_sb_info *sbi)
510{
511	return SM_I(sbi)->reserved_segments;
 
512}
513
514static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
515{
516	return FREE_I(sbi)->free_sections;
517}
518
519static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
520{
521	return DIRTY_I(sbi)->nr_dirty[PRE];
522}
523
524static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
525{
526	return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
527		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
528		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
529		DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
530		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
531		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
532}
533
534static inline int overprovision_segments(struct f2fs_sb_info *sbi)
535{
536	return SM_I(sbi)->ovp_segments;
537}
538
539static inline int reserved_sections(struct f2fs_sb_info *sbi)
540{
541	return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
542}
543
544static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
 
545{
546	unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
547					get_pages(sbi, F2FS_DIRTY_DENTS);
548	unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
549	unsigned int segno, left_blocks;
550	int i;
551
552	/* check current node segment */
553	for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
554		segno = CURSEG_I(sbi, i)->segno;
555		left_blocks = sbi->blocks_per_seg -
556			get_seg_entry(sbi, segno)->ckpt_valid_blocks;
557
558		if (node_blocks > left_blocks)
559			return false;
560	}
561
562	/* check current data segment */
563	segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
564	left_blocks = sbi->blocks_per_seg -
565			get_seg_entry(sbi, segno)->ckpt_valid_blocks;
566	if (dent_blocks > left_blocks)
567		return false;
568	return true;
569}
570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
571static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
572					int freed, int needed)
573{
574	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
575	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
576	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
577
578	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
579		return false;
580
581	if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
582			has_curseg_enough_space(sbi))
 
 
 
 
 
583		return false;
584	return (free_sections(sbi) + freed) <=
585		(node_secs + 2 * dent_secs + imeta_secs +
586		reserved_sections(sbi) + needed);
 
 
 
 
 
 
587}
588
589static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
590{
591	if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
592		return true;
593	if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
594		return true;
595	return false;
596}
597
598static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
599{
600	return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
601}
602
603static inline int utilization(struct f2fs_sb_info *sbi)
604{
605	return div_u64((u64)valid_user_blocks(sbi) * 100,
606					sbi->user_block_count);
607}
608
609/*
610 * Sometimes f2fs may be better to drop out-of-place update policy.
611 * And, users can control the policy through sysfs entries.
612 * There are five policies with triggering conditions as follows.
613 * F2FS_IPU_FORCE - all the time,
614 * F2FS_IPU_SSR - if SSR mode is activated,
615 * F2FS_IPU_UTIL - if FS utilization is over threashold,
616 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
617 *                     threashold,
618 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
619 *                     storages. IPU will be triggered only if the # of dirty
620 *                     pages over min_fsync_blocks.
621 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
 
 
 
 
622 */
623#define DEF_MIN_IPU_UTIL	70
624#define DEF_MIN_FSYNC_BLOCKS	8
625#define DEF_MIN_HOT_BLOCKS	16
626
627#define SMALL_VOLUME_SEGMENTS	(16 * 512)	/* 16GB */
628
 
 
 
629enum {
630	F2FS_IPU_FORCE,
631	F2FS_IPU_SSR,
632	F2FS_IPU_UTIL,
633	F2FS_IPU_SSR_UTIL,
634	F2FS_IPU_FSYNC,
635	F2FS_IPU_ASYNC,
 
 
 
636};
637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
639		int type)
640{
641	struct curseg_info *curseg = CURSEG_I(sbi, type);
642	return curseg->segno;
643}
644
645static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
646		int type)
647{
648	struct curseg_info *curseg = CURSEG_I(sbi, type);
649	return curseg->alloc_type;
650}
651
652static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
653{
654	struct curseg_info *curseg = CURSEG_I(sbi, type);
655	return curseg->next_blkoff;
656}
657
658static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
659{
660	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
661}
662
663static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
664{
665	struct f2fs_sb_info *sbi = fio->sbi;
666
667	if (__is_valid_data_blkaddr(fio->old_blkaddr))
668		verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
669					META_GENERIC : DATA_GENERIC);
670	verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
671					META_GENERIC : DATA_GENERIC_ENHANCE);
672}
673
674/*
675 * Summary block is always treated as an invalid block
676 */
677static inline int check_block_count(struct f2fs_sb_info *sbi,
678		int segno, struct f2fs_sit_entry *raw_sit)
679{
680	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
681	int valid_blocks = 0;
682	int cur_pos = 0, next_pos;
 
683
684	/* check bitmap with valid block count */
685	do {
686		if (is_valid) {
687			next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
688					sbi->blocks_per_seg,
689					cur_pos);
690			valid_blocks += next_pos - cur_pos;
691		} else
692			next_pos = find_next_bit_le(&raw_sit->valid_map,
693					sbi->blocks_per_seg,
694					cur_pos);
695		cur_pos = next_pos;
696		is_valid = !is_valid;
697	} while (cur_pos < sbi->blocks_per_seg);
698
699	if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
700		f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
701			 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
702		set_sbi_flag(sbi, SBI_NEED_FSCK);
 
703		return -EFSCORRUPTED;
704	}
705
 
 
 
 
 
706	/* check segment usage, and check boundary of a given segment number */
707	if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
708					|| segno > TOTAL_SEGS(sbi) - 1)) {
709		f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
710			 GET_SIT_VBLOCKS(raw_sit), segno);
711		set_sbi_flag(sbi, SBI_NEED_FSCK);
 
712		return -EFSCORRUPTED;
713	}
714	return 0;
715}
716
717static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
718						unsigned int start)
719{
720	struct sit_info *sit_i = SIT_I(sbi);
721	unsigned int offset = SIT_BLOCK_OFFSET(start);
722	block_t blk_addr = sit_i->sit_base_addr + offset;
723
724	check_seg_range(sbi, start);
725
726#ifdef CONFIG_F2FS_CHECK_FS
727	if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
728			f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
729		f2fs_bug_on(sbi, 1);
730#endif
731
732	/* calculate sit block address */
733	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
734		blk_addr += sit_i->sit_blocks;
735
736	return blk_addr;
737}
738
739static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
740						pgoff_t block_addr)
741{
742	struct sit_info *sit_i = SIT_I(sbi);
743	block_addr -= sit_i->sit_base_addr;
744	if (block_addr < sit_i->sit_blocks)
745		block_addr += sit_i->sit_blocks;
746	else
747		block_addr -= sit_i->sit_blocks;
748
749	return block_addr + sit_i->sit_base_addr;
750}
751
752static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
753{
754	unsigned int block_off = SIT_BLOCK_OFFSET(start);
755
756	f2fs_change_bit(block_off, sit_i->sit_bitmap);
757#ifdef CONFIG_F2FS_CHECK_FS
758	f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
759#endif
760}
761
762static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
763						bool base_time)
764{
765	struct sit_info *sit_i = SIT_I(sbi);
766	time64_t diff, now = ktime_get_real_seconds();
767
768	if (now >= sit_i->mounted_time)
769		return sit_i->elapsed_time + now - sit_i->mounted_time;
770
771	/* system time is set to the past */
772	if (!base_time) {
773		diff = sit_i->mounted_time - now;
774		if (sit_i->elapsed_time >= diff)
775			return sit_i->elapsed_time - diff;
776		return 0;
777	}
778	return sit_i->elapsed_time;
779}
780
781static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
782			unsigned int ofs_in_node, unsigned char version)
783{
784	sum->nid = cpu_to_le32(nid);
785	sum->ofs_in_node = cpu_to_le16(ofs_in_node);
786	sum->version = version;
787}
788
789static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
790{
791	return __start_cp_addr(sbi) +
792		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
793}
794
795static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
796{
797	return __start_cp_addr(sbi) +
798		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
799				- (base + 1) + type;
800}
801
802static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
803{
804	if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
805		return true;
806	return false;
807}
808
809/*
810 * It is very important to gather dirty pages and write at once, so that we can
811 * submit a big bio without interfering other data writes.
812 * By default, 512 pages for directory data,
813 * 512 pages (2MB) * 8 for nodes, and
814 * 256 pages * 8 for meta are set.
815 */
816static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
817{
818	if (sbi->sb->s_bdi->wb.dirty_exceeded)
819		return 0;
820
821	if (type == DATA)
822		return sbi->blocks_per_seg;
823	else if (type == NODE)
824		return 8 * sbi->blocks_per_seg;
825	else if (type == META)
826		return 8 * BIO_MAX_PAGES;
827	else
828		return 0;
829}
830
831/*
832 * When writing pages, it'd better align nr_to_write for segment size.
833 */
834static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
835					struct writeback_control *wbc)
836{
837	long nr_to_write, desired;
838
839	if (wbc->sync_mode != WB_SYNC_NONE)
840		return 0;
841
842	nr_to_write = wbc->nr_to_write;
843	desired = BIO_MAX_PAGES;
844	if (type == NODE)
845		desired <<= 1;
846
847	wbc->nr_to_write = desired;
848	return desired - nr_to_write;
849}
850
851static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
852{
853	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
854	bool wakeup = false;
855	int i;
856
857	if (force)
858		goto wake_up;
859
860	mutex_lock(&dcc->cmd_lock);
861	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
862		if (i + 1 < dcc->discard_granularity)
863			break;
864		if (!list_empty(&dcc->pend_list[i])) {
865			wakeup = true;
866			break;
867		}
868	}
869	mutex_unlock(&dcc->cmd_lock);
870	if (!wakeup || !is_idle(sbi, DISCARD_TIME))
871		return;
872wake_up:
873	dcc->discard_wake = 1;
874	wake_up_interruptible_all(&dcc->discard_wait_queue);
875}
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * fs/f2fs/segment.h
  4 *
  5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6 *             http://www.samsung.com/
  7 */
  8#include <linux/blkdev.h>
  9#include <linux/backing-dev.h>
 10
 11/* constant macro */
 12#define NULL_SEGNO			((unsigned int)(~0))
 13#define NULL_SECNO			((unsigned int)(~0))
 14
 15#define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
 16#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
 17
 18#define F2FS_MIN_SEGMENTS	9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
 19#define F2FS_MIN_META_SEGMENTS	8 /* SB + 2 (CP + SIT + NAT) + SSA */
 20
 21/* L: Logical segment # in volume, R: Relative segment # in main area */
 22#define GET_L2R_SEGNO(free_i, segno)	((segno) - (free_i)->start_segno)
 23#define GET_R2L_SEGNO(free_i, segno)	((segno) + (free_i)->start_segno)
 24
 25#define IS_DATASEG(t)	((t) <= CURSEG_COLD_DATA)
 26#define IS_NODESEG(t)	((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
 27#define SE_PAGETYPE(se)	((IS_NODESEG((se)->type) ? NODE : DATA))
 28
 29static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
 30						unsigned short seg_type)
 31{
 32	f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
 33}
 34
 35#define IS_HOT(t)	((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
 36#define IS_WARM(t)	((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
 37#define IS_COLD(t)	((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
 38
 39#define IS_CURSEG(sbi, seg)						\
 40	(((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\
 41	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\
 42	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\
 43	 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\
 44	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\
 45	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) ||	\
 46	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) ||	\
 47	 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
 48
 49#define IS_CURSEC(sbi, secno)						\
 50	(((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /		\
 51	  (sbi)->segs_per_sec) ||	\
 52	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno /		\
 53	  (sbi)->segs_per_sec) ||	\
 54	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno /		\
 55	  (sbi)->segs_per_sec) ||	\
 56	 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno /		\
 57	  (sbi)->segs_per_sec) ||	\
 58	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno /		\
 59	  (sbi)->segs_per_sec) ||	\
 60	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\
 61	  (sbi)->segs_per_sec) ||	\
 62	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno /	\
 63	  (sbi)->segs_per_sec) ||	\
 64	 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno /	\
 65	  (sbi)->segs_per_sec))
 66
 67#define MAIN_BLKADDR(sbi)						\
 68	(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : 				\
 69		le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
 70#define SEG0_BLKADDR(sbi)						\
 71	(SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : 				\
 72		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
 73
 74#define MAIN_SEGS(sbi)	(SM_I(sbi)->main_segments)
 75#define MAIN_SECS(sbi)	((sbi)->total_sections)
 76
 77#define TOTAL_SEGS(sbi)							\
 78	(SM_I(sbi) ? SM_I(sbi)->segment_count : 				\
 79		le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
 80#define TOTAL_BLKS(sbi)	(TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
 81
 82#define MAX_BLKADDR(sbi)	(SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
 83#define SEGMENT_SIZE(sbi)	(1ULL << ((sbi)->log_blocksize +	\
 84					(sbi)->log_blocks_per_seg))
 85
 86#define START_BLOCK(sbi, segno)	(SEG0_BLKADDR(sbi) +			\
 87	 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
 88
 89#define NEXT_FREE_BLKADDR(sbi, curseg)					\
 90	(START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
 91
 92#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)	((blk_addr) - SEG0_BLKADDR(sbi))
 93#define GET_SEGNO_FROM_SEG0(sbi, blk_addr)				\
 94	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
 95#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)				\
 96	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
 97
 98#define GET_SEGNO(sbi, blk_addr)					\
 99	((!__is_valid_data_blkaddr(blk_addr)) ?			\
100	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
101		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
102#define BLKS_PER_SEC(sbi)					\
103	((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
104#define CAP_BLKS_PER_SEC(sbi)					\
105	((sbi)->segs_per_sec * (sbi)->blocks_per_seg -		\
106	 (sbi)->unusable_blocks_per_sec)
107#define CAP_SEGS_PER_SEC(sbi)					\
108	((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
109	(sbi)->log_blocks_per_seg))
110#define GET_SEC_FROM_SEG(sbi, segno)				\
111	(((segno) == -1) ? -1 : (segno) / (sbi)->segs_per_sec)
112#define GET_SEG_FROM_SEC(sbi, secno)				\
113	((secno) * (sbi)->segs_per_sec)
114#define GET_ZONE_FROM_SEC(sbi, secno)				\
115	(((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
116#define GET_ZONE_FROM_SEG(sbi, segno)				\
117	GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
118
119#define GET_SUM_BLOCK(sbi, segno)				\
120	((sbi)->sm_info->ssa_blkaddr + (segno))
121
122#define GET_SUM_TYPE(footer) ((footer)->entry_type)
123#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
124
125#define SIT_ENTRY_OFFSET(sit_i, segno)					\
126	((segno) % (sit_i)->sents_per_block)
127#define SIT_BLOCK_OFFSET(segno)					\
128	((segno) / SIT_ENTRY_PER_BLOCK)
129#define	START_SEGNO(segno)		\
130	(SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
131#define SIT_BLK_CNT(sbi)			\
132	DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
133#define f2fs_bitmap_size(nr)			\
134	(BITS_TO_LONGS(nr) * sizeof(unsigned long))
135
136#define SECTOR_FROM_BLOCK(blk_addr)					\
137	(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
138#define SECTOR_TO_BLOCK(sectors)					\
139	((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
140
141/*
142 * indicate a block allocation direction: RIGHT and LEFT.
143 * RIGHT means allocating new sections towards the end of volume.
144 * LEFT means the opposite direction.
145 */
146enum {
147	ALLOC_RIGHT = 0,
148	ALLOC_LEFT
149};
150
151/*
152 * In the victim_sel_policy->alloc_mode, there are three block allocation modes.
153 * LFS writes data sequentially with cleaning operations.
154 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
155 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into
156 * fragmented segment which has similar aging degree.
157 */
158enum {
159	LFS = 0,
160	SSR,
161	AT_SSR,
162};
163
164/*
165 * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes.
166 * GC_CB is based on cost-benefit algorithm.
167 * GC_GREEDY is based on greedy algorithm.
168 * GC_AT is based on age-threshold algorithm.
169 */
170enum {
171	GC_CB = 0,
172	GC_GREEDY,
173	GC_AT,
174	ALLOC_NEXT,
175	FLUSH_DEVICE,
176	MAX_GC_POLICY,
177};
178
179/*
180 * BG_GC means the background cleaning job.
181 * FG_GC means the on-demand cleaning job.
 
182 */
183enum {
184	BG_GC = 0,
185	FG_GC,
 
186};
187
188/* for a function parameter to select a victim segment */
189struct victim_sel_policy {
190	int alloc_mode;			/* LFS or SSR */
191	int gc_mode;			/* GC_CB or GC_GREEDY */
192	unsigned long *dirty_bitmap;	/* dirty segment/section bitmap */
193	unsigned int max_search;	/*
194					 * maximum # of segments/sections
195					 * to search
196					 */
197	unsigned int offset;		/* last scanned bitmap offset */
198	unsigned int ofs_unit;		/* bitmap search unit */
199	unsigned int min_cost;		/* minimum cost */
200	unsigned long long oldest_age;	/* oldest age of segments having the same min cost */
201	unsigned int min_segno;		/* segment # having min. cost */
202	unsigned long long age;		/* mtime of GCed section*/
203	unsigned long long age_threshold;/* age threshold */
204};
205
206struct seg_entry {
207	unsigned int type:6;		/* segment type like CURSEG_XXX_TYPE */
208	unsigned int valid_blocks:10;	/* # of valid blocks */
209	unsigned int ckpt_valid_blocks:10;	/* # of valid blocks last cp */
210	unsigned int padding:6;		/* padding */
211	unsigned char *cur_valid_map;	/* validity bitmap of blocks */
212#ifdef CONFIG_F2FS_CHECK_FS
213	unsigned char *cur_valid_map_mir;	/* mirror of current valid bitmap */
214#endif
215	/*
216	 * # of valid blocks and the validity bitmap stored in the last
217	 * checkpoint pack. This information is used by the SSR mode.
218	 */
219	unsigned char *ckpt_valid_map;	/* validity bitmap of blocks last cp */
220	unsigned char *discard_map;
221	unsigned long long mtime;	/* modification time of the segment */
222};
223
224struct sec_entry {
225	unsigned int valid_blocks;	/* # of valid blocks in a section */
226};
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228#define MAX_SKIP_GC_COUNT			16
229
230struct revoke_entry {
231	struct list_head list;
 
232	block_t old_addr;		/* for revoking when fail to commit */
233	pgoff_t index;
234};
235
236struct sit_info {
 
 
237	block_t sit_base_addr;		/* start block address of SIT area */
238	block_t sit_blocks;		/* # of blocks used by SIT area */
239	block_t written_valid_blocks;	/* # of valid blocks in main area */
240	char *bitmap;			/* all bitmaps pointer */
241	char *sit_bitmap;		/* SIT bitmap pointer */
242#ifdef CONFIG_F2FS_CHECK_FS
243	char *sit_bitmap_mir;		/* SIT bitmap mirror */
244
245	/* bitmap of segments to be ignored by GC in case of errors */
246	unsigned long *invalid_segmap;
247#endif
248	unsigned int bitmap_size;	/* SIT bitmap size */
249
250	unsigned long *tmp_map;			/* bitmap for temporal use */
251	unsigned long *dirty_sentries_bitmap;	/* bitmap for dirty sentries */
252	unsigned int dirty_sentries;		/* # of dirty sentries */
253	unsigned int sents_per_block;		/* # of SIT entries per block */
254	struct rw_semaphore sentry_lock;	/* to protect SIT cache */
255	struct seg_entry *sentries;		/* SIT segment-level cache */
256	struct sec_entry *sec_entries;		/* SIT section-level cache */
257
258	/* for cost-benefit algorithm in cleaning procedure */
259	unsigned long long elapsed_time;	/* elapsed time after mount */
260	unsigned long long mounted_time;	/* mount time */
261	unsigned long long min_mtime;		/* min. modification time */
262	unsigned long long max_mtime;		/* max. modification time */
263	unsigned long long dirty_min_mtime;	/* rerange candidates in GC_AT */
264	unsigned long long dirty_max_mtime;	/* rerange candidates in GC_AT */
265
266	unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
267};
268
269struct free_segmap_info {
270	unsigned int start_segno;	/* start segment number logically */
271	unsigned int free_segments;	/* # of free segments */
272	unsigned int free_sections;	/* # of free sections */
273	spinlock_t segmap_lock;		/* free segmap lock */
274	unsigned long *free_segmap;	/* free segment bitmap */
275	unsigned long *free_secmap;	/* free section bitmap */
276};
277
278/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
279enum dirty_type {
280	DIRTY_HOT_DATA,		/* dirty segments assigned as hot data logs */
281	DIRTY_WARM_DATA,	/* dirty segments assigned as warm data logs */
282	DIRTY_COLD_DATA,	/* dirty segments assigned as cold data logs */
283	DIRTY_HOT_NODE,		/* dirty segments assigned as hot node logs */
284	DIRTY_WARM_NODE,	/* dirty segments assigned as warm node logs */
285	DIRTY_COLD_NODE,	/* dirty segments assigned as cold node logs */
286	DIRTY,			/* to count # of dirty segments */
287	PRE,			/* to count # of entirely obsolete segments */
288	NR_DIRTY_TYPE
289};
290
291struct dirty_seglist_info {
 
292	unsigned long *dirty_segmap[NR_DIRTY_TYPE];
293	unsigned long *dirty_secmap;
294	struct mutex seglist_lock;		/* lock for segment bitmaps */
295	int nr_dirty[NR_DIRTY_TYPE];		/* # of dirty segments */
296	unsigned long *victim_secmap;		/* background GC victims */
297	unsigned long *pinned_secmap;		/* pinned victims from foreground GC */
298	unsigned int pinned_secmap_cnt;		/* count of victims which has pinned data */
299	bool enable_pin_section;		/* enable pinning section */
 
 
 
300};
301
302/* for active log information */
303struct curseg_info {
304	struct mutex curseg_mutex;		/* lock for consistency */
305	struct f2fs_summary_block *sum_blk;	/* cached summary block */
306	struct rw_semaphore journal_rwsem;	/* protect journal area */
307	struct f2fs_journal *journal;		/* cached journal info */
308	unsigned char alloc_type;		/* current allocation type */
309	unsigned short seg_type;		/* segment type like CURSEG_XXX_TYPE */
310	unsigned int segno;			/* current segment number */
311	unsigned short next_blkoff;		/* next block offset to write */
312	unsigned int zone;			/* current zone number */
313	unsigned int next_segno;		/* preallocated segment */
314	int fragment_remained_chunk;		/* remained block size in a chunk for block fragmentation mode */
315	bool inited;				/* indicate inmem log is inited */
316};
317
318struct sit_entry_set {
319	struct list_head set_list;	/* link with all sit sets */
320	unsigned int start_segno;	/* start segno of sits in set */
321	unsigned int entry_cnt;		/* the # of sit entries in set */
322};
323
324/*
325 * inline functions
326 */
327static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
328{
329	return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
330}
331
332static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
333						unsigned int segno)
334{
335	struct sit_info *sit_i = SIT_I(sbi);
336	return &sit_i->sentries[segno];
337}
338
339static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
340						unsigned int segno)
341{
342	struct sit_info *sit_i = SIT_I(sbi);
343	return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
344}
345
346static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
347				unsigned int segno, bool use_section)
348{
349	/*
350	 * In order to get # of valid blocks in a section instantly from many
351	 * segments, f2fs manages two counting structures separately.
352	 */
353	if (use_section && __is_large_section(sbi))
354		return get_sec_entry(sbi, segno)->valid_blocks;
355	else
356		return get_seg_entry(sbi, segno)->valid_blocks;
357}
358
359static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
360				unsigned int segno, bool use_section)
361{
362	if (use_section && __is_large_section(sbi)) {
363		unsigned int start_segno = START_SEGNO(segno);
364		unsigned int blocks = 0;
365		int i;
366
367		for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
368			struct seg_entry *se = get_seg_entry(sbi, start_segno);
369
370			blocks += se->ckpt_valid_blocks;
371		}
372		return blocks;
373	}
374	return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
375}
376
377static inline void seg_info_from_raw_sit(struct seg_entry *se,
378					struct f2fs_sit_entry *rs)
379{
380	se->valid_blocks = GET_SIT_VBLOCKS(rs);
381	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
382	memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
383	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
384#ifdef CONFIG_F2FS_CHECK_FS
385	memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
386#endif
387	se->type = GET_SIT_TYPE(rs);
388	se->mtime = le64_to_cpu(rs->mtime);
389}
390
391static inline void __seg_info_to_raw_sit(struct seg_entry *se,
392					struct f2fs_sit_entry *rs)
393{
394	unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
395					se->valid_blocks;
396	rs->vblocks = cpu_to_le16(raw_vblocks);
397	memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
398	rs->mtime = cpu_to_le64(se->mtime);
399}
400
401static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
402				struct page *page, unsigned int start)
403{
404	struct f2fs_sit_block *raw_sit;
405	struct seg_entry *se;
406	struct f2fs_sit_entry *rs;
407	unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
408					(unsigned long)MAIN_SEGS(sbi));
409	int i;
410
411	raw_sit = (struct f2fs_sit_block *)page_address(page);
412	memset(raw_sit, 0, PAGE_SIZE);
413	for (i = 0; i < end - start; i++) {
414		rs = &raw_sit->entries[i];
415		se = get_seg_entry(sbi, start + i);
416		__seg_info_to_raw_sit(se, rs);
417	}
418}
419
420static inline void seg_info_to_raw_sit(struct seg_entry *se,
421					struct f2fs_sit_entry *rs)
422{
423	__seg_info_to_raw_sit(se, rs);
424
425	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
426	se->ckpt_valid_blocks = se->valid_blocks;
427}
428
429static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
430		unsigned int max, unsigned int segno)
431{
432	unsigned int ret;
433	spin_lock(&free_i->segmap_lock);
434	ret = find_next_bit(free_i->free_segmap, max, segno);
435	spin_unlock(&free_i->segmap_lock);
436	return ret;
437}
438
439static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
440{
441	struct free_segmap_info *free_i = FREE_I(sbi);
442	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
443	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
444	unsigned int next;
445	unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
446
447	spin_lock(&free_i->segmap_lock);
448	clear_bit(segno, free_i->free_segmap);
449	free_i->free_segments++;
450
451	next = find_next_bit(free_i->free_segmap,
452			start_segno + sbi->segs_per_sec, start_segno);
453	if (next >= start_segno + usable_segs) {
454		clear_bit(secno, free_i->free_secmap);
455		free_i->free_sections++;
456	}
457	spin_unlock(&free_i->segmap_lock);
458}
459
460static inline void __set_inuse(struct f2fs_sb_info *sbi,
461		unsigned int segno)
462{
463	struct free_segmap_info *free_i = FREE_I(sbi);
464	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
465
466	set_bit(segno, free_i->free_segmap);
467	free_i->free_segments--;
468	if (!test_and_set_bit(secno, free_i->free_secmap))
469		free_i->free_sections--;
470}
471
472static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
473		unsigned int segno, bool inmem)
474{
475	struct free_segmap_info *free_i = FREE_I(sbi);
476	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
477	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
478	unsigned int next;
479	unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
480
481	spin_lock(&free_i->segmap_lock);
482	if (test_and_clear_bit(segno, free_i->free_segmap)) {
483		free_i->free_segments++;
484
485		if (!inmem && IS_CURSEC(sbi, secno))
486			goto skip_free;
487		next = find_next_bit(free_i->free_segmap,
488				start_segno + sbi->segs_per_sec, start_segno);
489		if (next >= start_segno + usable_segs) {
490			if (test_and_clear_bit(secno, free_i->free_secmap))
491				free_i->free_sections++;
492		}
493	}
494skip_free:
495	spin_unlock(&free_i->segmap_lock);
496}
497
498static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
499		unsigned int segno)
500{
501	struct free_segmap_info *free_i = FREE_I(sbi);
502	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
503
504	spin_lock(&free_i->segmap_lock);
505	if (!test_and_set_bit(segno, free_i->free_segmap)) {
506		free_i->free_segments--;
507		if (!test_and_set_bit(secno, free_i->free_secmap))
508			free_i->free_sections--;
509	}
510	spin_unlock(&free_i->segmap_lock);
511}
512
513static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
514		void *dst_addr)
515{
516	struct sit_info *sit_i = SIT_I(sbi);
517
518#ifdef CONFIG_F2FS_CHECK_FS
519	if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
520						sit_i->bitmap_size))
521		f2fs_bug_on(sbi, 1);
522#endif
523	memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
524}
525
526static inline block_t written_block_count(struct f2fs_sb_info *sbi)
527{
528	return SIT_I(sbi)->written_valid_blocks;
529}
530
531static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
532{
533	return FREE_I(sbi)->free_segments;
534}
535
536static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
537{
538	return SM_I(sbi)->reserved_segments +
539			SM_I(sbi)->additional_reserved_segments;
540}
541
542static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
543{
544	return FREE_I(sbi)->free_sections;
545}
546
547static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
548{
549	return DIRTY_I(sbi)->nr_dirty[PRE];
550}
551
552static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
553{
554	return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
555		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
556		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
557		DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
558		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
559		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
560}
561
562static inline int overprovision_segments(struct f2fs_sb_info *sbi)
563{
564	return SM_I(sbi)->ovp_segments;
565}
566
567static inline int reserved_sections(struct f2fs_sb_info *sbi)
568{
569	return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
570}
571
572static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
573			unsigned int node_blocks, unsigned int dent_blocks)
574{
575
 
 
576	unsigned int segno, left_blocks;
577	int i;
578
579	/* check current node segment */
580	for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
581		segno = CURSEG_I(sbi, i)->segno;
582		left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
583				get_seg_entry(sbi, segno)->ckpt_valid_blocks;
584
585		if (node_blocks > left_blocks)
586			return false;
587	}
588
589	/* check current data segment */
590	segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
591	left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
592			get_seg_entry(sbi, segno)->ckpt_valid_blocks;
593	if (dent_blocks > left_blocks)
594		return false;
595	return true;
596}
597
598/*
599 * calculate needed sections for dirty node/dentry
600 * and call has_curseg_enough_space
601 */
602static inline void __get_secs_required(struct f2fs_sb_info *sbi,
603		unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p)
604{
605	unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
606					get_pages(sbi, F2FS_DIRTY_DENTS) +
607					get_pages(sbi, F2FS_DIRTY_IMETA);
608	unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
609	unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi);
610	unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
611	unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
612	unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
613
614	if (lower_p)
615		*lower_p = node_secs + dent_secs;
616	if (upper_p)
617		*upper_p = node_secs + dent_secs +
618			(node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
619	if (curseg_p)
620		*curseg_p = has_curseg_enough_space(sbi,
621				node_blocks, dent_blocks);
622}
623
624static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
625					int freed, int needed)
626{
627	unsigned int free_secs, lower_secs, upper_secs;
628	bool curseg_space;
 
629
630	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
631		return false;
632
633	__get_secs_required(sbi, &lower_secs, &upper_secs, &curseg_space);
634
635	free_secs = free_sections(sbi) + freed;
636	lower_secs += needed + reserved_sections(sbi);
637	upper_secs += needed + reserved_sections(sbi);
638
639	if (free_secs > upper_secs)
640		return false;
641	else if (free_secs <= lower_secs)
642		return true;
643	return !curseg_space;
644}
645
646static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi,
647					int freed, int needed)
648{
649	return !has_not_enough_free_secs(sbi, freed, needed);
650}
651
652static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
653{
654	if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
655		return true;
656	if (likely(has_enough_free_secs(sbi, 0, 0)))
657		return true;
658	return false;
659}
660
661static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
662{
663	return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
664}
665
666static inline int utilization(struct f2fs_sb_info *sbi)
667{
668	return div_u64((u64)valid_user_blocks(sbi) * 100,
669					sbi->user_block_count);
670}
671
672/*
673 * Sometimes f2fs may be better to drop out-of-place update policy.
674 * And, users can control the policy through sysfs entries.
675 * There are five policies with triggering conditions as follows.
676 * F2FS_IPU_FORCE - all the time,
677 * F2FS_IPU_SSR - if SSR mode is activated,
678 * F2FS_IPU_UTIL - if FS utilization is over threashold,
679 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
680 *                     threashold,
681 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
682 *                     storages. IPU will be triggered only if the # of dirty
683 *                     pages over min_fsync_blocks. (=default option)
684 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
685 * F2FS_IPU_NOCACHE - disable IPU bio cache.
686 * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
687 *                            FI_OPU_WRITE flag.
688 * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
689 */
690#define DEF_MIN_IPU_UTIL	70
691#define DEF_MIN_FSYNC_BLOCKS	8
692#define DEF_MIN_HOT_BLOCKS	16
693
694#define SMALL_VOLUME_SEGMENTS	(16 * 512)	/* 16GB */
695
696#define F2FS_IPU_DISABLE	0
697
698/* Modification on enum should be synchronized with ipu_mode_names array */
699enum {
700	F2FS_IPU_FORCE,
701	F2FS_IPU_SSR,
702	F2FS_IPU_UTIL,
703	F2FS_IPU_SSR_UTIL,
704	F2FS_IPU_FSYNC,
705	F2FS_IPU_ASYNC,
706	F2FS_IPU_NOCACHE,
707	F2FS_IPU_HONOR_OPU_WRITE,
708	F2FS_IPU_MAX,
709};
710
711static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi)
712{
713	return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE;
714}
715
716#define F2FS_IPU_POLICY(name)					\
717static inline bool IS_##name(struct f2fs_sb_info *sbi)		\
718{								\
719	return SM_I(sbi)->ipu_policy & BIT(name);		\
720}
721
722F2FS_IPU_POLICY(F2FS_IPU_FORCE);
723F2FS_IPU_POLICY(F2FS_IPU_SSR);
724F2FS_IPU_POLICY(F2FS_IPU_UTIL);
725F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL);
726F2FS_IPU_POLICY(F2FS_IPU_FSYNC);
727F2FS_IPU_POLICY(F2FS_IPU_ASYNC);
728F2FS_IPU_POLICY(F2FS_IPU_NOCACHE);
729F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE);
730
731static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
732		int type)
733{
734	struct curseg_info *curseg = CURSEG_I(sbi, type);
735	return curseg->segno;
736}
737
738static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
739		int type)
740{
741	struct curseg_info *curseg = CURSEG_I(sbi, type);
742	return curseg->alloc_type;
743}
744
745static inline bool valid_main_segno(struct f2fs_sb_info *sbi,
746		unsigned int segno)
 
 
 
 
 
747{
748	return segno <= (MAIN_SEGS(sbi) - 1);
749}
750
751static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
752{
753	struct f2fs_sb_info *sbi = fio->sbi;
754
755	if (__is_valid_data_blkaddr(fio->old_blkaddr))
756		verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
757					META_GENERIC : DATA_GENERIC);
758	verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
759					META_GENERIC : DATA_GENERIC_ENHANCE);
760}
761
762/*
763 * Summary block is always treated as an invalid block
764 */
765static inline int check_block_count(struct f2fs_sb_info *sbi,
766		int segno, struct f2fs_sit_entry *raw_sit)
767{
768	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
769	int valid_blocks = 0;
770	int cur_pos = 0, next_pos;
771	unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno);
772
773	/* check bitmap with valid block count */
774	do {
775		if (is_valid) {
776			next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
777					usable_blks_per_seg,
778					cur_pos);
779			valid_blocks += next_pos - cur_pos;
780		} else
781			next_pos = find_next_bit_le(&raw_sit->valid_map,
782					usable_blks_per_seg,
783					cur_pos);
784		cur_pos = next_pos;
785		is_valid = !is_valid;
786	} while (cur_pos < usable_blks_per_seg);
787
788	if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
789		f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
790			 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
791		set_sbi_flag(sbi, SBI_NEED_FSCK);
792		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
793		return -EFSCORRUPTED;
794	}
795
796	if (usable_blks_per_seg < sbi->blocks_per_seg)
797		f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
798				sbi->blocks_per_seg,
799				usable_blks_per_seg) != sbi->blocks_per_seg);
800
801	/* check segment usage, and check boundary of a given segment number */
802	if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
803					|| !valid_main_segno(sbi, segno))) {
804		f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
805			 GET_SIT_VBLOCKS(raw_sit), segno);
806		set_sbi_flag(sbi, SBI_NEED_FSCK);
807		f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
808		return -EFSCORRUPTED;
809	}
810	return 0;
811}
812
813static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
814						unsigned int start)
815{
816	struct sit_info *sit_i = SIT_I(sbi);
817	unsigned int offset = SIT_BLOCK_OFFSET(start);
818	block_t blk_addr = sit_i->sit_base_addr + offset;
819
820	f2fs_bug_on(sbi, !valid_main_segno(sbi, start));
821
822#ifdef CONFIG_F2FS_CHECK_FS
823	if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
824			f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
825		f2fs_bug_on(sbi, 1);
826#endif
827
828	/* calculate sit block address */
829	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
830		blk_addr += sit_i->sit_blocks;
831
832	return blk_addr;
833}
834
835static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
836						pgoff_t block_addr)
837{
838	struct sit_info *sit_i = SIT_I(sbi);
839	block_addr -= sit_i->sit_base_addr;
840	if (block_addr < sit_i->sit_blocks)
841		block_addr += sit_i->sit_blocks;
842	else
843		block_addr -= sit_i->sit_blocks;
844
845	return block_addr + sit_i->sit_base_addr;
846}
847
848static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
849{
850	unsigned int block_off = SIT_BLOCK_OFFSET(start);
851
852	f2fs_change_bit(block_off, sit_i->sit_bitmap);
853#ifdef CONFIG_F2FS_CHECK_FS
854	f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
855#endif
856}
857
858static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
859						bool base_time)
860{
861	struct sit_info *sit_i = SIT_I(sbi);
862	time64_t diff, now = ktime_get_boottime_seconds();
863
864	if (now >= sit_i->mounted_time)
865		return sit_i->elapsed_time + now - sit_i->mounted_time;
866
867	/* system time is set to the past */
868	if (!base_time) {
869		diff = sit_i->mounted_time - now;
870		if (sit_i->elapsed_time >= diff)
871			return sit_i->elapsed_time - diff;
872		return 0;
873	}
874	return sit_i->elapsed_time;
875}
876
877static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
878			unsigned int ofs_in_node, unsigned char version)
879{
880	sum->nid = cpu_to_le32(nid);
881	sum->ofs_in_node = cpu_to_le16(ofs_in_node);
882	sum->version = version;
883}
884
885static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
886{
887	return __start_cp_addr(sbi) +
888		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
889}
890
891static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
892{
893	return __start_cp_addr(sbi) +
894		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
895				- (base + 1) + type;
896}
897
898static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
899{
900	if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
901		return true;
902	return false;
903}
904
905/*
906 * It is very important to gather dirty pages and write at once, so that we can
907 * submit a big bio without interfering other data writes.
908 * By default, 512 pages for directory data,
909 * 512 pages (2MB) * 8 for nodes, and
910 * 256 pages * 8 for meta are set.
911 */
912static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
913{
914	if (sbi->sb->s_bdi->wb.dirty_exceeded)
915		return 0;
916
917	if (type == DATA)
918		return sbi->blocks_per_seg;
919	else if (type == NODE)
920		return 8 * sbi->blocks_per_seg;
921	else if (type == META)
922		return 8 * BIO_MAX_VECS;
923	else
924		return 0;
925}
926
927/*
928 * When writing pages, it'd better align nr_to_write for segment size.
929 */
930static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
931					struct writeback_control *wbc)
932{
933	long nr_to_write, desired;
934
935	if (wbc->sync_mode != WB_SYNC_NONE)
936		return 0;
937
938	nr_to_write = wbc->nr_to_write;
939	desired = BIO_MAX_VECS;
940	if (type == NODE)
941		desired <<= 1;
942
943	wbc->nr_to_write = desired;
944	return desired - nr_to_write;
945}
946
947static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
948{
949	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
950	bool wakeup = false;
951	int i;
952
953	if (force)
954		goto wake_up;
955
956	mutex_lock(&dcc->cmd_lock);
957	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
958		if (i + 1 < dcc->discard_granularity)
959			break;
960		if (!list_empty(&dcc->pend_list[i])) {
961			wakeup = true;
962			break;
963		}
964	}
965	mutex_unlock(&dcc->cmd_lock);
966	if (!wakeup || !is_idle(sbi, DISCARD_TIME))
967		return;
968wake_up:
969	dcc->discard_wake = true;
970	wake_up_interruptible_all(&dcc->discard_wait_queue);
971}