Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * fs/f2fs/segment.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/blkdev.h>
9#include <linux/backing-dev.h>
10
11/* constant macro */
12#define NULL_SEGNO ((unsigned int)(~0))
13#define NULL_SECNO ((unsigned int)(~0))
14
15#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
16#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
17
18#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19#define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */
20
21/* L: Logical segment # in volume, R: Relative segment # in main area */
22#define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
23#define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
24
25#define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
26#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
27
28static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
29 unsigned short seg_type)
30{
31 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
32}
33
34#define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
35#define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
36#define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
37
38#define IS_CURSEG(sbi, seg) \
39 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
40 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
41 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
42 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
43 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
44 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \
45 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \
46 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
47
48#define IS_CURSEC(sbi, secno) \
49 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
50 (sbi)->segs_per_sec) || \
51 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
52 (sbi)->segs_per_sec) || \
53 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
54 (sbi)->segs_per_sec) || \
55 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
56 (sbi)->segs_per_sec) || \
57 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
58 (sbi)->segs_per_sec) || \
59 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
60 (sbi)->segs_per_sec) || \
61 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
62 (sbi)->segs_per_sec) || \
63 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
64 (sbi)->segs_per_sec))
65
66#define MAIN_BLKADDR(sbi) \
67 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
68 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
69#define SEG0_BLKADDR(sbi) \
70 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
71 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
72
73#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
74#define MAIN_SECS(sbi) ((sbi)->total_sections)
75
76#define TOTAL_SEGS(sbi) \
77 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
78 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
79#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
80
81#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
82#define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
83 (sbi)->log_blocks_per_seg))
84
85#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
86 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
87
88#define NEXT_FREE_BLKADDR(sbi, curseg) \
89 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
90
91#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
92#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
93 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
94#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
95 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
96
97#define GET_SEGNO(sbi, blk_addr) \
98 ((!__is_valid_data_blkaddr(blk_addr)) ? \
99 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
100 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
101#define BLKS_PER_SEC(sbi) \
102 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
103#define GET_SEC_FROM_SEG(sbi, segno) \
104 (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
105#define GET_SEG_FROM_SEC(sbi, secno) \
106 ((secno) * (sbi)->segs_per_sec)
107#define GET_ZONE_FROM_SEC(sbi, secno) \
108 (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
109#define GET_ZONE_FROM_SEG(sbi, segno) \
110 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
111
112#define GET_SUM_BLOCK(sbi, segno) \
113 ((sbi)->sm_info->ssa_blkaddr + (segno))
114
115#define GET_SUM_TYPE(footer) ((footer)->entry_type)
116#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
117
118#define SIT_ENTRY_OFFSET(sit_i, segno) \
119 ((segno) % (sit_i)->sents_per_block)
120#define SIT_BLOCK_OFFSET(segno) \
121 ((segno) / SIT_ENTRY_PER_BLOCK)
122#define START_SEGNO(segno) \
123 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
124#define SIT_BLK_CNT(sbi) \
125 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
126#define f2fs_bitmap_size(nr) \
127 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
128
129#define SECTOR_FROM_BLOCK(blk_addr) \
130 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
131#define SECTOR_TO_BLOCK(sectors) \
132 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
133
134/*
135 * indicate a block allocation direction: RIGHT and LEFT.
136 * RIGHT means allocating new sections towards the end of volume.
137 * LEFT means the opposite direction.
138 */
139enum {
140 ALLOC_RIGHT = 0,
141 ALLOC_LEFT
142};
143
144/*
145 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
146 * LFS writes data sequentially with cleaning operations.
147 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
148 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into
149 * fragmented segment which has similar aging degree.
150 */
151enum {
152 LFS = 0,
153 SSR,
154 AT_SSR,
155};
156
157/*
158 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
159 * GC_CB is based on cost-benefit algorithm.
160 * GC_GREEDY is based on greedy algorithm.
161 * GC_AT is based on age-threshold algorithm.
162 */
163enum {
164 GC_CB = 0,
165 GC_GREEDY,
166 GC_AT,
167 ALLOC_NEXT,
168 FLUSH_DEVICE,
169 MAX_GC_POLICY,
170};
171
172/*
173 * BG_GC means the background cleaning job.
174 * FG_GC means the on-demand cleaning job.
175 */
176enum {
177 BG_GC = 0,
178 FG_GC,
179};
180
181/* for a function parameter to select a victim segment */
182struct victim_sel_policy {
183 int alloc_mode; /* LFS or SSR */
184 int gc_mode; /* GC_CB or GC_GREEDY */
185 unsigned long *dirty_bitmap; /* dirty segment/section bitmap */
186 unsigned int max_search; /*
187 * maximum # of segments/sections
188 * to search
189 */
190 unsigned int offset; /* last scanned bitmap offset */
191 unsigned int ofs_unit; /* bitmap search unit */
192 unsigned int min_cost; /* minimum cost */
193 unsigned long long oldest_age; /* oldest age of segments having the same min cost */
194 unsigned int min_segno; /* segment # having min. cost */
195 unsigned long long age; /* mtime of GCed section*/
196 unsigned long long age_threshold;/* age threshold */
197};
198
199struct seg_entry {
200 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
201 unsigned int valid_blocks:10; /* # of valid blocks */
202 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
203 unsigned int padding:6; /* padding */
204 unsigned char *cur_valid_map; /* validity bitmap of blocks */
205#ifdef CONFIG_F2FS_CHECK_FS
206 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
207#endif
208 /*
209 * # of valid blocks and the validity bitmap stored in the last
210 * checkpoint pack. This information is used by the SSR mode.
211 */
212 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
213 unsigned char *discard_map;
214 unsigned long long mtime; /* modification time of the segment */
215};
216
217struct sec_entry {
218 unsigned int valid_blocks; /* # of valid blocks in a section */
219};
220
221struct segment_allocation {
222 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
223};
224
225#define MAX_SKIP_GC_COUNT 16
226
227struct inmem_pages {
228 struct list_head list;
229 struct page *page;
230 block_t old_addr; /* for revoking when fail to commit */
231};
232
233struct sit_info {
234 const struct segment_allocation *s_ops;
235
236 block_t sit_base_addr; /* start block address of SIT area */
237 block_t sit_blocks; /* # of blocks used by SIT area */
238 block_t written_valid_blocks; /* # of valid blocks in main area */
239 char *bitmap; /* all bitmaps pointer */
240 char *sit_bitmap; /* SIT bitmap pointer */
241#ifdef CONFIG_F2FS_CHECK_FS
242 char *sit_bitmap_mir; /* SIT bitmap mirror */
243
244 /* bitmap of segments to be ignored by GC in case of errors */
245 unsigned long *invalid_segmap;
246#endif
247 unsigned int bitmap_size; /* SIT bitmap size */
248
249 unsigned long *tmp_map; /* bitmap for temporal use */
250 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
251 unsigned int dirty_sentries; /* # of dirty sentries */
252 unsigned int sents_per_block; /* # of SIT entries per block */
253 struct rw_semaphore sentry_lock; /* to protect SIT cache */
254 struct seg_entry *sentries; /* SIT segment-level cache */
255 struct sec_entry *sec_entries; /* SIT section-level cache */
256
257 /* for cost-benefit algorithm in cleaning procedure */
258 unsigned long long elapsed_time; /* elapsed time after mount */
259 unsigned long long mounted_time; /* mount time */
260 unsigned long long min_mtime; /* min. modification time */
261 unsigned long long max_mtime; /* max. modification time */
262 unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */
263 unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */
264
265 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
266};
267
268struct free_segmap_info {
269 unsigned int start_segno; /* start segment number logically */
270 unsigned int free_segments; /* # of free segments */
271 unsigned int free_sections; /* # of free sections */
272 spinlock_t segmap_lock; /* free segmap lock */
273 unsigned long *free_segmap; /* free segment bitmap */
274 unsigned long *free_secmap; /* free section bitmap */
275};
276
277/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
278enum dirty_type {
279 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
280 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
281 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
282 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
283 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
284 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
285 DIRTY, /* to count # of dirty segments */
286 PRE, /* to count # of entirely obsolete segments */
287 NR_DIRTY_TYPE
288};
289
290struct dirty_seglist_info {
291 const struct victim_selection *v_ops; /* victim selction operation */
292 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
293 unsigned long *dirty_secmap;
294 struct mutex seglist_lock; /* lock for segment bitmaps */
295 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
296 unsigned long *victim_secmap; /* background GC victims */
297};
298
299/* victim selection function for cleaning and SSR */
300struct victim_selection {
301 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
302 int, int, char, unsigned long long);
303};
304
305/* for active log information */
306struct curseg_info {
307 struct mutex curseg_mutex; /* lock for consistency */
308 struct f2fs_summary_block *sum_blk; /* cached summary block */
309 struct rw_semaphore journal_rwsem; /* protect journal area */
310 struct f2fs_journal *journal; /* cached journal info */
311 unsigned char alloc_type; /* current allocation type */
312 unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */
313 unsigned int segno; /* current segment number */
314 unsigned short next_blkoff; /* next block offset to write */
315 unsigned int zone; /* current zone number */
316 unsigned int next_segno; /* preallocated segment */
317 bool inited; /* indicate inmem log is inited */
318};
319
320struct sit_entry_set {
321 struct list_head set_list; /* link with all sit sets */
322 unsigned int start_segno; /* start segno of sits in set */
323 unsigned int entry_cnt; /* the # of sit entries in set */
324};
325
326/*
327 * inline functions
328 */
329static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
330{
331 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
332}
333
334static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
335 unsigned int segno)
336{
337 struct sit_info *sit_i = SIT_I(sbi);
338 return &sit_i->sentries[segno];
339}
340
341static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
342 unsigned int segno)
343{
344 struct sit_info *sit_i = SIT_I(sbi);
345 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
346}
347
348static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
349 unsigned int segno, bool use_section)
350{
351 /*
352 * In order to get # of valid blocks in a section instantly from many
353 * segments, f2fs manages two counting structures separately.
354 */
355 if (use_section && __is_large_section(sbi))
356 return get_sec_entry(sbi, segno)->valid_blocks;
357 else
358 return get_seg_entry(sbi, segno)->valid_blocks;
359}
360
361static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
362 unsigned int segno, bool use_section)
363{
364 if (use_section && __is_large_section(sbi)) {
365 unsigned int start_segno = START_SEGNO(segno);
366 unsigned int blocks = 0;
367 int i;
368
369 for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
370 struct seg_entry *se = get_seg_entry(sbi, start_segno);
371
372 blocks += se->ckpt_valid_blocks;
373 }
374 return blocks;
375 }
376 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
377}
378
379static inline void seg_info_from_raw_sit(struct seg_entry *se,
380 struct f2fs_sit_entry *rs)
381{
382 se->valid_blocks = GET_SIT_VBLOCKS(rs);
383 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
384 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
385 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
386#ifdef CONFIG_F2FS_CHECK_FS
387 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
388#endif
389 se->type = GET_SIT_TYPE(rs);
390 se->mtime = le64_to_cpu(rs->mtime);
391}
392
393static inline void __seg_info_to_raw_sit(struct seg_entry *se,
394 struct f2fs_sit_entry *rs)
395{
396 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
397 se->valid_blocks;
398 rs->vblocks = cpu_to_le16(raw_vblocks);
399 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
400 rs->mtime = cpu_to_le64(se->mtime);
401}
402
403static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
404 struct page *page, unsigned int start)
405{
406 struct f2fs_sit_block *raw_sit;
407 struct seg_entry *se;
408 struct f2fs_sit_entry *rs;
409 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
410 (unsigned long)MAIN_SEGS(sbi));
411 int i;
412
413 raw_sit = (struct f2fs_sit_block *)page_address(page);
414 memset(raw_sit, 0, PAGE_SIZE);
415 for (i = 0; i < end - start; i++) {
416 rs = &raw_sit->entries[i];
417 se = get_seg_entry(sbi, start + i);
418 __seg_info_to_raw_sit(se, rs);
419 }
420}
421
422static inline void seg_info_to_raw_sit(struct seg_entry *se,
423 struct f2fs_sit_entry *rs)
424{
425 __seg_info_to_raw_sit(se, rs);
426
427 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
428 se->ckpt_valid_blocks = se->valid_blocks;
429}
430
431static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
432 unsigned int max, unsigned int segno)
433{
434 unsigned int ret;
435 spin_lock(&free_i->segmap_lock);
436 ret = find_next_bit(free_i->free_segmap, max, segno);
437 spin_unlock(&free_i->segmap_lock);
438 return ret;
439}
440
441static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
442{
443 struct free_segmap_info *free_i = FREE_I(sbi);
444 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
445 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
446 unsigned int next;
447 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
448
449 spin_lock(&free_i->segmap_lock);
450 clear_bit(segno, free_i->free_segmap);
451 free_i->free_segments++;
452
453 next = find_next_bit(free_i->free_segmap,
454 start_segno + sbi->segs_per_sec, start_segno);
455 if (next >= start_segno + usable_segs) {
456 clear_bit(secno, free_i->free_secmap);
457 free_i->free_sections++;
458 }
459 spin_unlock(&free_i->segmap_lock);
460}
461
462static inline void __set_inuse(struct f2fs_sb_info *sbi,
463 unsigned int segno)
464{
465 struct free_segmap_info *free_i = FREE_I(sbi);
466 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
467
468 set_bit(segno, free_i->free_segmap);
469 free_i->free_segments--;
470 if (!test_and_set_bit(secno, free_i->free_secmap))
471 free_i->free_sections--;
472}
473
474static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
475 unsigned int segno, bool inmem)
476{
477 struct free_segmap_info *free_i = FREE_I(sbi);
478 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
479 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
480 unsigned int next;
481 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
482
483 spin_lock(&free_i->segmap_lock);
484 if (test_and_clear_bit(segno, free_i->free_segmap)) {
485 free_i->free_segments++;
486
487 if (!inmem && IS_CURSEC(sbi, secno))
488 goto skip_free;
489 next = find_next_bit(free_i->free_segmap,
490 start_segno + sbi->segs_per_sec, start_segno);
491 if (next >= start_segno + usable_segs) {
492 if (test_and_clear_bit(secno, free_i->free_secmap))
493 free_i->free_sections++;
494 }
495 }
496skip_free:
497 spin_unlock(&free_i->segmap_lock);
498}
499
500static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
501 unsigned int segno)
502{
503 struct free_segmap_info *free_i = FREE_I(sbi);
504 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
505
506 spin_lock(&free_i->segmap_lock);
507 if (!test_and_set_bit(segno, free_i->free_segmap)) {
508 free_i->free_segments--;
509 if (!test_and_set_bit(secno, free_i->free_secmap))
510 free_i->free_sections--;
511 }
512 spin_unlock(&free_i->segmap_lock);
513}
514
515static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
516 void *dst_addr)
517{
518 struct sit_info *sit_i = SIT_I(sbi);
519
520#ifdef CONFIG_F2FS_CHECK_FS
521 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
522 sit_i->bitmap_size))
523 f2fs_bug_on(sbi, 1);
524#endif
525 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
526}
527
528static inline block_t written_block_count(struct f2fs_sb_info *sbi)
529{
530 return SIT_I(sbi)->written_valid_blocks;
531}
532
533static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
534{
535 return FREE_I(sbi)->free_segments;
536}
537
538static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
539{
540 return SM_I(sbi)->reserved_segments;
541}
542
543static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
544{
545 return FREE_I(sbi)->free_sections;
546}
547
548static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
549{
550 return DIRTY_I(sbi)->nr_dirty[PRE];
551}
552
553static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
554{
555 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
556 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
557 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
558 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
559 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
560 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
561}
562
563static inline int overprovision_segments(struct f2fs_sb_info *sbi)
564{
565 return SM_I(sbi)->ovp_segments;
566}
567
568static inline int reserved_sections(struct f2fs_sb_info *sbi)
569{
570 return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
571}
572
573static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
574{
575 unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
576 get_pages(sbi, F2FS_DIRTY_DENTS);
577 unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
578 unsigned int segno, left_blocks;
579 int i;
580
581 /* check current node segment */
582 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
583 segno = CURSEG_I(sbi, i)->segno;
584 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
585 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
586
587 if (node_blocks > left_blocks)
588 return false;
589 }
590
591 /* check current data segment */
592 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
593 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
594 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
595 if (dent_blocks > left_blocks)
596 return false;
597 return true;
598}
599
600static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
601 int freed, int needed)
602{
603 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
604 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
605 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
606
607 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
608 return false;
609
610 if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
611 has_curseg_enough_space(sbi))
612 return false;
613 return (free_sections(sbi) + freed) <=
614 (node_secs + 2 * dent_secs + imeta_secs +
615 reserved_sections(sbi) + needed);
616}
617
618static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
619{
620 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
621 return true;
622 if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
623 return true;
624 return false;
625}
626
627static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
628{
629 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
630}
631
632static inline int utilization(struct f2fs_sb_info *sbi)
633{
634 return div_u64((u64)valid_user_blocks(sbi) * 100,
635 sbi->user_block_count);
636}
637
638/*
639 * Sometimes f2fs may be better to drop out-of-place update policy.
640 * And, users can control the policy through sysfs entries.
641 * There are five policies with triggering conditions as follows.
642 * F2FS_IPU_FORCE - all the time,
643 * F2FS_IPU_SSR - if SSR mode is activated,
644 * F2FS_IPU_UTIL - if FS utilization is over threashold,
645 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
646 * threashold,
647 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
648 * storages. IPU will be triggered only if the # of dirty
649 * pages over min_fsync_blocks. (=default option)
650 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
651 * F2FS_IPU_NOCACHE - disable IPU bio cache.
652 * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
653 */
654#define DEF_MIN_IPU_UTIL 70
655#define DEF_MIN_FSYNC_BLOCKS 8
656#define DEF_MIN_HOT_BLOCKS 16
657
658#define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
659
660enum {
661 F2FS_IPU_FORCE,
662 F2FS_IPU_SSR,
663 F2FS_IPU_UTIL,
664 F2FS_IPU_SSR_UTIL,
665 F2FS_IPU_FSYNC,
666 F2FS_IPU_ASYNC,
667 F2FS_IPU_NOCACHE,
668};
669
670static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
671 int type)
672{
673 struct curseg_info *curseg = CURSEG_I(sbi, type);
674 return curseg->segno;
675}
676
677static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
678 int type)
679{
680 struct curseg_info *curseg = CURSEG_I(sbi, type);
681 return curseg->alloc_type;
682}
683
684static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
685{
686 struct curseg_info *curseg = CURSEG_I(sbi, type);
687 return curseg->next_blkoff;
688}
689
690static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
691{
692 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
693}
694
695static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
696{
697 struct f2fs_sb_info *sbi = fio->sbi;
698
699 if (__is_valid_data_blkaddr(fio->old_blkaddr))
700 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
701 META_GENERIC : DATA_GENERIC);
702 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
703 META_GENERIC : DATA_GENERIC_ENHANCE);
704}
705
706/*
707 * Summary block is always treated as an invalid block
708 */
709static inline int check_block_count(struct f2fs_sb_info *sbi,
710 int segno, struct f2fs_sit_entry *raw_sit)
711{
712 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
713 int valid_blocks = 0;
714 int cur_pos = 0, next_pos;
715 unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno);
716
717 /* check bitmap with valid block count */
718 do {
719 if (is_valid) {
720 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
721 usable_blks_per_seg,
722 cur_pos);
723 valid_blocks += next_pos - cur_pos;
724 } else
725 next_pos = find_next_bit_le(&raw_sit->valid_map,
726 usable_blks_per_seg,
727 cur_pos);
728 cur_pos = next_pos;
729 is_valid = !is_valid;
730 } while (cur_pos < usable_blks_per_seg);
731
732 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
733 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
734 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
735 set_sbi_flag(sbi, SBI_NEED_FSCK);
736 return -EFSCORRUPTED;
737 }
738
739 if (usable_blks_per_seg < sbi->blocks_per_seg)
740 f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
741 sbi->blocks_per_seg,
742 usable_blks_per_seg) != sbi->blocks_per_seg);
743
744 /* check segment usage, and check boundary of a given segment number */
745 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
746 || segno > TOTAL_SEGS(sbi) - 1)) {
747 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
748 GET_SIT_VBLOCKS(raw_sit), segno);
749 set_sbi_flag(sbi, SBI_NEED_FSCK);
750 return -EFSCORRUPTED;
751 }
752 return 0;
753}
754
755static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
756 unsigned int start)
757{
758 struct sit_info *sit_i = SIT_I(sbi);
759 unsigned int offset = SIT_BLOCK_OFFSET(start);
760 block_t blk_addr = sit_i->sit_base_addr + offset;
761
762 check_seg_range(sbi, start);
763
764#ifdef CONFIG_F2FS_CHECK_FS
765 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
766 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
767 f2fs_bug_on(sbi, 1);
768#endif
769
770 /* calculate sit block address */
771 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
772 blk_addr += sit_i->sit_blocks;
773
774 return blk_addr;
775}
776
777static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
778 pgoff_t block_addr)
779{
780 struct sit_info *sit_i = SIT_I(sbi);
781 block_addr -= sit_i->sit_base_addr;
782 if (block_addr < sit_i->sit_blocks)
783 block_addr += sit_i->sit_blocks;
784 else
785 block_addr -= sit_i->sit_blocks;
786
787 return block_addr + sit_i->sit_base_addr;
788}
789
790static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
791{
792 unsigned int block_off = SIT_BLOCK_OFFSET(start);
793
794 f2fs_change_bit(block_off, sit_i->sit_bitmap);
795#ifdef CONFIG_F2FS_CHECK_FS
796 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
797#endif
798}
799
800static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
801 bool base_time)
802{
803 struct sit_info *sit_i = SIT_I(sbi);
804 time64_t diff, now = ktime_get_boottime_seconds();
805
806 if (now >= sit_i->mounted_time)
807 return sit_i->elapsed_time + now - sit_i->mounted_time;
808
809 /* system time is set to the past */
810 if (!base_time) {
811 diff = sit_i->mounted_time - now;
812 if (sit_i->elapsed_time >= diff)
813 return sit_i->elapsed_time - diff;
814 return 0;
815 }
816 return sit_i->elapsed_time;
817}
818
819static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
820 unsigned int ofs_in_node, unsigned char version)
821{
822 sum->nid = cpu_to_le32(nid);
823 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
824 sum->version = version;
825}
826
827static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
828{
829 return __start_cp_addr(sbi) +
830 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
831}
832
833static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
834{
835 return __start_cp_addr(sbi) +
836 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
837 - (base + 1) + type;
838}
839
840static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
841{
842 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
843 return true;
844 return false;
845}
846
847/*
848 * It is very important to gather dirty pages and write at once, so that we can
849 * submit a big bio without interfering other data writes.
850 * By default, 512 pages for directory data,
851 * 512 pages (2MB) * 8 for nodes, and
852 * 256 pages * 8 for meta are set.
853 */
854static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
855{
856 if (sbi->sb->s_bdi->wb.dirty_exceeded)
857 return 0;
858
859 if (type == DATA)
860 return sbi->blocks_per_seg;
861 else if (type == NODE)
862 return 8 * sbi->blocks_per_seg;
863 else if (type == META)
864 return 8 * BIO_MAX_VECS;
865 else
866 return 0;
867}
868
869/*
870 * When writing pages, it'd better align nr_to_write for segment size.
871 */
872static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
873 struct writeback_control *wbc)
874{
875 long nr_to_write, desired;
876
877 if (wbc->sync_mode != WB_SYNC_NONE)
878 return 0;
879
880 nr_to_write = wbc->nr_to_write;
881 desired = BIO_MAX_VECS;
882 if (type == NODE)
883 desired <<= 1;
884
885 wbc->nr_to_write = desired;
886 return desired - nr_to_write;
887}
888
889static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
890{
891 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
892 bool wakeup = false;
893 int i;
894
895 if (force)
896 goto wake_up;
897
898 mutex_lock(&dcc->cmd_lock);
899 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
900 if (i + 1 < dcc->discard_granularity)
901 break;
902 if (!list_empty(&dcc->pend_list[i])) {
903 wakeup = true;
904 break;
905 }
906 }
907 mutex_unlock(&dcc->cmd_lock);
908 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
909 return;
910wake_up:
911 dcc->discard_wake = 1;
912 wake_up_interruptible_all(&dcc->discard_wait_queue);
913}
1/*
2 * fs/f2fs/segment.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/blkdev.h>
12#include <linux/backing-dev.h>
13
14/* constant macro */
15#define NULL_SEGNO ((unsigned int)(~0))
16#define NULL_SECNO ((unsigned int)(~0))
17
18#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
19#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
20
21#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
22
23/* L: Logical segment # in volume, R: Relative segment # in main area */
24#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
25#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
26
27#define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
28#define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
29
30#define IS_CURSEG(sbi, seg) \
31 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
32 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
33 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
34 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
35 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
36 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
37
38#define IS_CURSEC(sbi, secno) \
39 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
40 sbi->segs_per_sec) || \
41 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
42 sbi->segs_per_sec) || \
43 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
44 sbi->segs_per_sec) || \
45 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
46 sbi->segs_per_sec) || \
47 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
48 sbi->segs_per_sec) || \
49 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
50 sbi->segs_per_sec)) \
51
52#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
53#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
54
55#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
56#define MAIN_SECS(sbi) (sbi->total_sections)
57
58#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
59#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
60
61#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
62#define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \
63 sbi->log_blocks_per_seg))
64
65#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
66 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
67
68#define NEXT_FREE_BLKADDR(sbi, curseg) \
69 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
70
71#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
72#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
73 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
74#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
75 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
76
77#define GET_SEGNO(sbi, blk_addr) \
78 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
79 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
80 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
81#define GET_SECNO(sbi, segno) \
82 ((segno) / sbi->segs_per_sec)
83#define GET_ZONENO_FROM_SEGNO(sbi, segno) \
84 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
85
86#define GET_SUM_BLOCK(sbi, segno) \
87 ((sbi->sm_info->ssa_blkaddr) + segno)
88
89#define GET_SUM_TYPE(footer) ((footer)->entry_type)
90#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
91
92#define SIT_ENTRY_OFFSET(sit_i, segno) \
93 (segno % sit_i->sents_per_block)
94#define SIT_BLOCK_OFFSET(segno) \
95 (segno / SIT_ENTRY_PER_BLOCK)
96#define START_SEGNO(segno) \
97 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
98#define SIT_BLK_CNT(sbi) \
99 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
100#define f2fs_bitmap_size(nr) \
101 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
102
103#define SECTOR_FROM_BLOCK(blk_addr) \
104 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
105#define SECTOR_TO_BLOCK(sectors) \
106 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
107
108/*
109 * indicate a block allocation direction: RIGHT and LEFT.
110 * RIGHT means allocating new sections towards the end of volume.
111 * LEFT means the opposite direction.
112 */
113enum {
114 ALLOC_RIGHT = 0,
115 ALLOC_LEFT
116};
117
118/*
119 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
120 * LFS writes data sequentially with cleaning operations.
121 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
122 */
123enum {
124 LFS = 0,
125 SSR
126};
127
128/*
129 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
130 * GC_CB is based on cost-benefit algorithm.
131 * GC_GREEDY is based on greedy algorithm.
132 */
133enum {
134 GC_CB = 0,
135 GC_GREEDY
136};
137
138/*
139 * BG_GC means the background cleaning job.
140 * FG_GC means the on-demand cleaning job.
141 * FORCE_FG_GC means on-demand cleaning job in background.
142 */
143enum {
144 BG_GC = 0,
145 FG_GC,
146 FORCE_FG_GC,
147};
148
149/* for a function parameter to select a victim segment */
150struct victim_sel_policy {
151 int alloc_mode; /* LFS or SSR */
152 int gc_mode; /* GC_CB or GC_GREEDY */
153 unsigned long *dirty_segmap; /* dirty segment bitmap */
154 unsigned int max_search; /* maximum # of segments to search */
155 unsigned int offset; /* last scanned bitmap offset */
156 unsigned int ofs_unit; /* bitmap search unit */
157 unsigned int min_cost; /* minimum cost */
158 unsigned int min_segno; /* segment # having min. cost */
159};
160
161struct seg_entry {
162 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
163 unsigned int valid_blocks:10; /* # of valid blocks */
164 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
165 unsigned int padding:6; /* padding */
166 unsigned char *cur_valid_map; /* validity bitmap of blocks */
167 /*
168 * # of valid blocks and the validity bitmap stored in the the last
169 * checkpoint pack. This information is used by the SSR mode.
170 */
171 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
172 unsigned char *discard_map;
173 unsigned long long mtime; /* modification time of the segment */
174};
175
176struct sec_entry {
177 unsigned int valid_blocks; /* # of valid blocks in a section */
178};
179
180struct segment_allocation {
181 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
182};
183
184/*
185 * this value is set in page as a private data which indicate that
186 * the page is atomically written, and it is in inmem_pages list.
187 */
188#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
189
190#define IS_ATOMIC_WRITTEN_PAGE(page) \
191 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
192
193struct inmem_pages {
194 struct list_head list;
195 struct page *page;
196 block_t old_addr; /* for revoking when fail to commit */
197};
198
199struct sit_info {
200 const struct segment_allocation *s_ops;
201
202 block_t sit_base_addr; /* start block address of SIT area */
203 block_t sit_blocks; /* # of blocks used by SIT area */
204 block_t written_valid_blocks; /* # of valid blocks in main area */
205 char *sit_bitmap; /* SIT bitmap pointer */
206 unsigned int bitmap_size; /* SIT bitmap size */
207
208 unsigned long *tmp_map; /* bitmap for temporal use */
209 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
210 unsigned int dirty_sentries; /* # of dirty sentries */
211 unsigned int sents_per_block; /* # of SIT entries per block */
212 struct mutex sentry_lock; /* to protect SIT cache */
213 struct seg_entry *sentries; /* SIT segment-level cache */
214 struct sec_entry *sec_entries; /* SIT section-level cache */
215
216 /* for cost-benefit algorithm in cleaning procedure */
217 unsigned long long elapsed_time; /* elapsed time after mount */
218 unsigned long long mounted_time; /* mount time */
219 unsigned long long min_mtime; /* min. modification time */
220 unsigned long long max_mtime; /* max. modification time */
221};
222
223struct free_segmap_info {
224 unsigned int start_segno; /* start segment number logically */
225 unsigned int free_segments; /* # of free segments */
226 unsigned int free_sections; /* # of free sections */
227 spinlock_t segmap_lock; /* free segmap lock */
228 unsigned long *free_segmap; /* free segment bitmap */
229 unsigned long *free_secmap; /* free section bitmap */
230};
231
232/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
233enum dirty_type {
234 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
235 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
236 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
237 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
238 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
239 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
240 DIRTY, /* to count # of dirty segments */
241 PRE, /* to count # of entirely obsolete segments */
242 NR_DIRTY_TYPE
243};
244
245struct dirty_seglist_info {
246 const struct victim_selection *v_ops; /* victim selction operation */
247 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
248 struct mutex seglist_lock; /* lock for segment bitmaps */
249 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
250 unsigned long *victim_secmap; /* background GC victims */
251};
252
253/* victim selection function for cleaning and SSR */
254struct victim_selection {
255 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
256 int, int, char);
257};
258
259/* for active log information */
260struct curseg_info {
261 struct mutex curseg_mutex; /* lock for consistency */
262 struct f2fs_summary_block *sum_blk; /* cached summary block */
263 struct rw_semaphore journal_rwsem; /* protect journal area */
264 struct f2fs_journal *journal; /* cached journal info */
265 unsigned char alloc_type; /* current allocation type */
266 unsigned int segno; /* current segment number */
267 unsigned short next_blkoff; /* next block offset to write */
268 unsigned int zone; /* current zone number */
269 unsigned int next_segno; /* preallocated segment */
270};
271
272struct sit_entry_set {
273 struct list_head set_list; /* link with all sit sets */
274 unsigned int start_segno; /* start segno of sits in set */
275 unsigned int entry_cnt; /* the # of sit entries in set */
276};
277
278/*
279 * inline functions
280 */
281static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
282{
283 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
284}
285
286static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
287 unsigned int segno)
288{
289 struct sit_info *sit_i = SIT_I(sbi);
290 return &sit_i->sentries[segno];
291}
292
293static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
294 unsigned int segno)
295{
296 struct sit_info *sit_i = SIT_I(sbi);
297 return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
298}
299
300static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
301 unsigned int segno, int section)
302{
303 /*
304 * In order to get # of valid blocks in a section instantly from many
305 * segments, f2fs manages two counting structures separately.
306 */
307 if (section > 1)
308 return get_sec_entry(sbi, segno)->valid_blocks;
309 else
310 return get_seg_entry(sbi, segno)->valid_blocks;
311}
312
313static inline void seg_info_from_raw_sit(struct seg_entry *se,
314 struct f2fs_sit_entry *rs)
315{
316 se->valid_blocks = GET_SIT_VBLOCKS(rs);
317 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
318 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
319 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
320 se->type = GET_SIT_TYPE(rs);
321 se->mtime = le64_to_cpu(rs->mtime);
322}
323
324static inline void seg_info_to_raw_sit(struct seg_entry *se,
325 struct f2fs_sit_entry *rs)
326{
327 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
328 se->valid_blocks;
329 rs->vblocks = cpu_to_le16(raw_vblocks);
330 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
331 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
332 se->ckpt_valid_blocks = se->valid_blocks;
333 rs->mtime = cpu_to_le64(se->mtime);
334}
335
336static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
337 unsigned int max, unsigned int segno)
338{
339 unsigned int ret;
340 spin_lock(&free_i->segmap_lock);
341 ret = find_next_bit(free_i->free_segmap, max, segno);
342 spin_unlock(&free_i->segmap_lock);
343 return ret;
344}
345
346static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
347{
348 struct free_segmap_info *free_i = FREE_I(sbi);
349 unsigned int secno = segno / sbi->segs_per_sec;
350 unsigned int start_segno = secno * sbi->segs_per_sec;
351 unsigned int next;
352
353 spin_lock(&free_i->segmap_lock);
354 clear_bit(segno, free_i->free_segmap);
355 free_i->free_segments++;
356
357 next = find_next_bit(free_i->free_segmap,
358 start_segno + sbi->segs_per_sec, start_segno);
359 if (next >= start_segno + sbi->segs_per_sec) {
360 clear_bit(secno, free_i->free_secmap);
361 free_i->free_sections++;
362 }
363 spin_unlock(&free_i->segmap_lock);
364}
365
366static inline void __set_inuse(struct f2fs_sb_info *sbi,
367 unsigned int segno)
368{
369 struct free_segmap_info *free_i = FREE_I(sbi);
370 unsigned int secno = segno / sbi->segs_per_sec;
371 set_bit(segno, free_i->free_segmap);
372 free_i->free_segments--;
373 if (!test_and_set_bit(secno, free_i->free_secmap))
374 free_i->free_sections--;
375}
376
377static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
378 unsigned int segno)
379{
380 struct free_segmap_info *free_i = FREE_I(sbi);
381 unsigned int secno = segno / sbi->segs_per_sec;
382 unsigned int start_segno = secno * sbi->segs_per_sec;
383 unsigned int next;
384
385 spin_lock(&free_i->segmap_lock);
386 if (test_and_clear_bit(segno, free_i->free_segmap)) {
387 free_i->free_segments++;
388
389 next = find_next_bit(free_i->free_segmap,
390 start_segno + sbi->segs_per_sec, start_segno);
391 if (next >= start_segno + sbi->segs_per_sec) {
392 if (test_and_clear_bit(secno, free_i->free_secmap))
393 free_i->free_sections++;
394 }
395 }
396 spin_unlock(&free_i->segmap_lock);
397}
398
399static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
400 unsigned int segno)
401{
402 struct free_segmap_info *free_i = FREE_I(sbi);
403 unsigned int secno = segno / sbi->segs_per_sec;
404 spin_lock(&free_i->segmap_lock);
405 if (!test_and_set_bit(segno, free_i->free_segmap)) {
406 free_i->free_segments--;
407 if (!test_and_set_bit(secno, free_i->free_secmap))
408 free_i->free_sections--;
409 }
410 spin_unlock(&free_i->segmap_lock);
411}
412
413static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
414 void *dst_addr)
415{
416 struct sit_info *sit_i = SIT_I(sbi);
417 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
418}
419
420static inline block_t written_block_count(struct f2fs_sb_info *sbi)
421{
422 return SIT_I(sbi)->written_valid_blocks;
423}
424
425static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
426{
427 return FREE_I(sbi)->free_segments;
428}
429
430static inline int reserved_segments(struct f2fs_sb_info *sbi)
431{
432 return SM_I(sbi)->reserved_segments;
433}
434
435static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
436{
437 return FREE_I(sbi)->free_sections;
438}
439
440static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
441{
442 return DIRTY_I(sbi)->nr_dirty[PRE];
443}
444
445static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
446{
447 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
448 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
449 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
450 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
451 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
452 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
453}
454
455static inline int overprovision_segments(struct f2fs_sb_info *sbi)
456{
457 return SM_I(sbi)->ovp_segments;
458}
459
460static inline int overprovision_sections(struct f2fs_sb_info *sbi)
461{
462 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
463}
464
465static inline int reserved_sections(struct f2fs_sb_info *sbi)
466{
467 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
468}
469
470static inline bool need_SSR(struct f2fs_sb_info *sbi)
471{
472 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
473 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
474 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
475
476 if (test_opt(sbi, LFS))
477 return false;
478
479 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
480 reserved_sections(sbi) + 1);
481}
482
483static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
484 int freed, int needed)
485{
486 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
487 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
488 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
489
490 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
491 return false;
492
493 return (free_sections(sbi) + freed) <=
494 (node_secs + 2 * dent_secs + imeta_secs +
495 reserved_sections(sbi) + needed);
496}
497
498static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
499{
500 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
501}
502
503static inline int utilization(struct f2fs_sb_info *sbi)
504{
505 return div_u64((u64)valid_user_blocks(sbi) * 100,
506 sbi->user_block_count);
507}
508
509/*
510 * Sometimes f2fs may be better to drop out-of-place update policy.
511 * And, users can control the policy through sysfs entries.
512 * There are five policies with triggering conditions as follows.
513 * F2FS_IPU_FORCE - all the time,
514 * F2FS_IPU_SSR - if SSR mode is activated,
515 * F2FS_IPU_UTIL - if FS utilization is over threashold,
516 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
517 * threashold,
518 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
519 * storages. IPU will be triggered only if the # of dirty
520 * pages over min_fsync_blocks.
521 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
522 */
523#define DEF_MIN_IPU_UTIL 70
524#define DEF_MIN_FSYNC_BLOCKS 8
525
526enum {
527 F2FS_IPU_FORCE,
528 F2FS_IPU_SSR,
529 F2FS_IPU_UTIL,
530 F2FS_IPU_SSR_UTIL,
531 F2FS_IPU_FSYNC,
532};
533
534static inline bool need_inplace_update(struct inode *inode)
535{
536 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
537 unsigned int policy = SM_I(sbi)->ipu_policy;
538
539 /* IPU can be done only for the user data */
540 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
541 return false;
542
543 if (test_opt(sbi, LFS))
544 return false;
545
546 if (policy & (0x1 << F2FS_IPU_FORCE))
547 return true;
548 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
549 return true;
550 if (policy & (0x1 << F2FS_IPU_UTIL) &&
551 utilization(sbi) > SM_I(sbi)->min_ipu_util)
552 return true;
553 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
554 utilization(sbi) > SM_I(sbi)->min_ipu_util)
555 return true;
556
557 /* this is only set during fdatasync */
558 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
559 is_inode_flag_set(inode, FI_NEED_IPU))
560 return true;
561
562 return false;
563}
564
565static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
566 int type)
567{
568 struct curseg_info *curseg = CURSEG_I(sbi, type);
569 return curseg->segno;
570}
571
572static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
573 int type)
574{
575 struct curseg_info *curseg = CURSEG_I(sbi, type);
576 return curseg->alloc_type;
577}
578
579static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
580{
581 struct curseg_info *curseg = CURSEG_I(sbi, type);
582 return curseg->next_blkoff;
583}
584
585static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
586{
587 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
588}
589
590static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
591{
592 BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
593 || blk_addr >= MAX_BLKADDR(sbi));
594}
595
596/*
597 * Summary block is always treated as an invalid block
598 */
599static inline void check_block_count(struct f2fs_sb_info *sbi,
600 int segno, struct f2fs_sit_entry *raw_sit)
601{
602#ifdef CONFIG_F2FS_CHECK_FS
603 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
604 int valid_blocks = 0;
605 int cur_pos = 0, next_pos;
606
607 /* check bitmap with valid block count */
608 do {
609 if (is_valid) {
610 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
611 sbi->blocks_per_seg,
612 cur_pos);
613 valid_blocks += next_pos - cur_pos;
614 } else
615 next_pos = find_next_bit_le(&raw_sit->valid_map,
616 sbi->blocks_per_seg,
617 cur_pos);
618 cur_pos = next_pos;
619 is_valid = !is_valid;
620 } while (cur_pos < sbi->blocks_per_seg);
621 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
622#endif
623 /* check segment usage, and check boundary of a given segment number */
624 f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
625 || segno > TOTAL_SEGS(sbi) - 1);
626}
627
628static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
629 unsigned int start)
630{
631 struct sit_info *sit_i = SIT_I(sbi);
632 unsigned int offset = SIT_BLOCK_OFFSET(start);
633 block_t blk_addr = sit_i->sit_base_addr + offset;
634
635 check_seg_range(sbi, start);
636
637 /* calculate sit block address */
638 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
639 blk_addr += sit_i->sit_blocks;
640
641 return blk_addr;
642}
643
644static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
645 pgoff_t block_addr)
646{
647 struct sit_info *sit_i = SIT_I(sbi);
648 block_addr -= sit_i->sit_base_addr;
649 if (block_addr < sit_i->sit_blocks)
650 block_addr += sit_i->sit_blocks;
651 else
652 block_addr -= sit_i->sit_blocks;
653
654 return block_addr + sit_i->sit_base_addr;
655}
656
657static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
658{
659 unsigned int block_off = SIT_BLOCK_OFFSET(start);
660
661 f2fs_change_bit(block_off, sit_i->sit_bitmap);
662}
663
664static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
665{
666 struct sit_info *sit_i = SIT_I(sbi);
667 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
668 sit_i->mounted_time;
669}
670
671static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
672 unsigned int ofs_in_node, unsigned char version)
673{
674 sum->nid = cpu_to_le32(nid);
675 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
676 sum->version = version;
677}
678
679static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
680{
681 return __start_cp_addr(sbi) +
682 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
683}
684
685static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
686{
687 return __start_cp_addr(sbi) +
688 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
689 - (base + 1) + type;
690}
691
692static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
693 unsigned int secno)
694{
695 if (get_valid_blocks(sbi, secno, sbi->segs_per_sec) >=
696 sbi->fggc_threshold)
697 return true;
698 return false;
699}
700
701static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
702{
703 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
704 return true;
705 return false;
706}
707
708/*
709 * It is very important to gather dirty pages and write at once, so that we can
710 * submit a big bio without interfering other data writes.
711 * By default, 512 pages for directory data,
712 * 512 pages (2MB) * 3 for three types of nodes, and
713 * max_bio_blocks for meta are set.
714 */
715static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
716{
717 if (sbi->sb->s_bdi->wb.dirty_exceeded)
718 return 0;
719
720 if (type == DATA)
721 return sbi->blocks_per_seg;
722 else if (type == NODE)
723 return 8 * sbi->blocks_per_seg;
724 else if (type == META)
725 return 8 * BIO_MAX_PAGES;
726 else
727 return 0;
728}
729
730/*
731 * When writing pages, it'd better align nr_to_write for segment size.
732 */
733static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
734 struct writeback_control *wbc)
735{
736 long nr_to_write, desired;
737
738 if (wbc->sync_mode != WB_SYNC_NONE)
739 return 0;
740
741 nr_to_write = wbc->nr_to_write;
742 desired = BIO_MAX_PAGES;
743 if (type == NODE)
744 desired <<= 1;
745
746 wbc->nr_to_write = desired;
747 return desired - nr_to_write;
748}