Loading...
1/*
2 * fs/f2fs/segment.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/blkdev.h>
12#include <linux/backing-dev.h>
13
14/* constant macro */
15#define NULL_SEGNO ((unsigned int)(~0))
16#define NULL_SECNO ((unsigned int)(~0))
17
18#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
19
20/* L: Logical segment # in volume, R: Relative segment # in main area */
21#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
22#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
23
24#define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
25#define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
26
27#define IS_CURSEG(sbi, seg) \
28 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
29 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
30 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
31 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
32 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
33 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
34
35#define IS_CURSEC(sbi, secno) \
36 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
37 sbi->segs_per_sec) || \
38 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
39 sbi->segs_per_sec) || \
40 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
41 sbi->segs_per_sec) || \
42 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
43 sbi->segs_per_sec) || \
44 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
45 sbi->segs_per_sec) || \
46 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
47 sbi->segs_per_sec)) \
48
49#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
50#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
51
52#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
53#define MAIN_SECS(sbi) (sbi->total_sections)
54
55#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
56#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
57
58#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
59#define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \
60 sbi->log_blocks_per_seg))
61
62#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
63 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
64
65#define NEXT_FREE_BLKADDR(sbi, curseg) \
66 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
67
68#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
69#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
70 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
71#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
72 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
73
74#define GET_SEGNO(sbi, blk_addr) \
75 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
76 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
77 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
78#define GET_SECNO(sbi, segno) \
79 ((segno) / sbi->segs_per_sec)
80#define GET_ZONENO_FROM_SEGNO(sbi, segno) \
81 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
82
83#define GET_SUM_BLOCK(sbi, segno) \
84 ((sbi->sm_info->ssa_blkaddr) + segno)
85
86#define GET_SUM_TYPE(footer) ((footer)->entry_type)
87#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
88
89#define SIT_ENTRY_OFFSET(sit_i, segno) \
90 (segno % sit_i->sents_per_block)
91#define SIT_BLOCK_OFFSET(segno) \
92 (segno / SIT_ENTRY_PER_BLOCK)
93#define START_SEGNO(segno) \
94 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
95#define SIT_BLK_CNT(sbi) \
96 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
97#define f2fs_bitmap_size(nr) \
98 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
99
100#define SECTOR_FROM_BLOCK(blk_addr) \
101 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
102#define SECTOR_TO_BLOCK(sectors) \
103 (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
104#define MAX_BIO_BLOCKS(sbi) \
105 ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
106
107/*
108 * indicate a block allocation direction: RIGHT and LEFT.
109 * RIGHT means allocating new sections towards the end of volume.
110 * LEFT means the opposite direction.
111 */
112enum {
113 ALLOC_RIGHT = 0,
114 ALLOC_LEFT
115};
116
117/*
118 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
119 * LFS writes data sequentially with cleaning operations.
120 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
121 */
122enum {
123 LFS = 0,
124 SSR
125};
126
127/*
128 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
129 * GC_CB is based on cost-benefit algorithm.
130 * GC_GREEDY is based on greedy algorithm.
131 */
132enum {
133 GC_CB = 0,
134 GC_GREEDY
135};
136
137/*
138 * BG_GC means the background cleaning job.
139 * FG_GC means the on-demand cleaning job.
140 * FORCE_FG_GC means on-demand cleaning job in background.
141 */
142enum {
143 BG_GC = 0,
144 FG_GC,
145 FORCE_FG_GC,
146};
147
148/* for a function parameter to select a victim segment */
149struct victim_sel_policy {
150 int alloc_mode; /* LFS or SSR */
151 int gc_mode; /* GC_CB or GC_GREEDY */
152 unsigned long *dirty_segmap; /* dirty segment bitmap */
153 unsigned int max_search; /* maximum # of segments to search */
154 unsigned int offset; /* last scanned bitmap offset */
155 unsigned int ofs_unit; /* bitmap search unit */
156 unsigned int min_cost; /* minimum cost */
157 unsigned int min_segno; /* segment # having min. cost */
158};
159
160struct seg_entry {
161 unsigned short valid_blocks; /* # of valid blocks */
162 unsigned char *cur_valid_map; /* validity bitmap of blocks */
163 /*
164 * # of valid blocks and the validity bitmap stored in the the last
165 * checkpoint pack. This information is used by the SSR mode.
166 */
167 unsigned short ckpt_valid_blocks;
168 unsigned char *ckpt_valid_map;
169 unsigned char *discard_map;
170 unsigned char type; /* segment type like CURSEG_XXX_TYPE */
171 unsigned long long mtime; /* modification time of the segment */
172};
173
174struct sec_entry {
175 unsigned int valid_blocks; /* # of valid blocks in a section */
176};
177
178struct segment_allocation {
179 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
180};
181
182/*
183 * this value is set in page as a private data which indicate that
184 * the page is atomically written, and it is in inmem_pages list.
185 */
186#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
187
188#define IS_ATOMIC_WRITTEN_PAGE(page) \
189 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
190
191struct inmem_pages {
192 struct list_head list;
193 struct page *page;
194 block_t old_addr; /* for revoking when fail to commit */
195};
196
197struct sit_info {
198 const struct segment_allocation *s_ops;
199
200 block_t sit_base_addr; /* start block address of SIT area */
201 block_t sit_blocks; /* # of blocks used by SIT area */
202 block_t written_valid_blocks; /* # of valid blocks in main area */
203 char *sit_bitmap; /* SIT bitmap pointer */
204 unsigned int bitmap_size; /* SIT bitmap size */
205
206 unsigned long *tmp_map; /* bitmap for temporal use */
207 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
208 unsigned int dirty_sentries; /* # of dirty sentries */
209 unsigned int sents_per_block; /* # of SIT entries per block */
210 struct mutex sentry_lock; /* to protect SIT cache */
211 struct seg_entry *sentries; /* SIT segment-level cache */
212 struct sec_entry *sec_entries; /* SIT section-level cache */
213
214 /* for cost-benefit algorithm in cleaning procedure */
215 unsigned long long elapsed_time; /* elapsed time after mount */
216 unsigned long long mounted_time; /* mount time */
217 unsigned long long min_mtime; /* min. modification time */
218 unsigned long long max_mtime; /* max. modification time */
219};
220
221struct free_segmap_info {
222 unsigned int start_segno; /* start segment number logically */
223 unsigned int free_segments; /* # of free segments */
224 unsigned int free_sections; /* # of free sections */
225 spinlock_t segmap_lock; /* free segmap lock */
226 unsigned long *free_segmap; /* free segment bitmap */
227 unsigned long *free_secmap; /* free section bitmap */
228};
229
230/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
231enum dirty_type {
232 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
233 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
234 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
235 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
236 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
237 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
238 DIRTY, /* to count # of dirty segments */
239 PRE, /* to count # of entirely obsolete segments */
240 NR_DIRTY_TYPE
241};
242
243struct dirty_seglist_info {
244 const struct victim_selection *v_ops; /* victim selction operation */
245 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
246 struct mutex seglist_lock; /* lock for segment bitmaps */
247 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
248 unsigned long *victim_secmap; /* background GC victims */
249};
250
251/* victim selection function for cleaning and SSR */
252struct victim_selection {
253 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
254 int, int, char);
255};
256
257/* for active log information */
258struct curseg_info {
259 struct mutex curseg_mutex; /* lock for consistency */
260 struct f2fs_summary_block *sum_blk; /* cached summary block */
261 struct rw_semaphore journal_rwsem; /* protect journal area */
262 struct f2fs_journal *journal; /* cached journal info */
263 unsigned char alloc_type; /* current allocation type */
264 unsigned int segno; /* current segment number */
265 unsigned short next_blkoff; /* next block offset to write */
266 unsigned int zone; /* current zone number */
267 unsigned int next_segno; /* preallocated segment */
268};
269
270struct sit_entry_set {
271 struct list_head set_list; /* link with all sit sets */
272 unsigned int start_segno; /* start segno of sits in set */
273 unsigned int entry_cnt; /* the # of sit entries in set */
274};
275
276/*
277 * inline functions
278 */
279static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
280{
281 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
282}
283
284static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
285 unsigned int segno)
286{
287 struct sit_info *sit_i = SIT_I(sbi);
288 return &sit_i->sentries[segno];
289}
290
291static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
292 unsigned int segno)
293{
294 struct sit_info *sit_i = SIT_I(sbi);
295 return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
296}
297
298static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
299 unsigned int segno, int section)
300{
301 /*
302 * In order to get # of valid blocks in a section instantly from many
303 * segments, f2fs manages two counting structures separately.
304 */
305 if (section > 1)
306 return get_sec_entry(sbi, segno)->valid_blocks;
307 else
308 return get_seg_entry(sbi, segno)->valid_blocks;
309}
310
311static inline void seg_info_from_raw_sit(struct seg_entry *se,
312 struct f2fs_sit_entry *rs)
313{
314 se->valid_blocks = GET_SIT_VBLOCKS(rs);
315 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
316 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
317 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
318 se->type = GET_SIT_TYPE(rs);
319 se->mtime = le64_to_cpu(rs->mtime);
320}
321
322static inline void seg_info_to_raw_sit(struct seg_entry *se,
323 struct f2fs_sit_entry *rs)
324{
325 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
326 se->valid_blocks;
327 rs->vblocks = cpu_to_le16(raw_vblocks);
328 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
329 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
330 se->ckpt_valid_blocks = se->valid_blocks;
331 rs->mtime = cpu_to_le64(se->mtime);
332}
333
334static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
335 unsigned int max, unsigned int segno)
336{
337 unsigned int ret;
338 spin_lock(&free_i->segmap_lock);
339 ret = find_next_bit(free_i->free_segmap, max, segno);
340 spin_unlock(&free_i->segmap_lock);
341 return ret;
342}
343
344static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
345{
346 struct free_segmap_info *free_i = FREE_I(sbi);
347 unsigned int secno = segno / sbi->segs_per_sec;
348 unsigned int start_segno = secno * sbi->segs_per_sec;
349 unsigned int next;
350
351 spin_lock(&free_i->segmap_lock);
352 clear_bit(segno, free_i->free_segmap);
353 free_i->free_segments++;
354
355 next = find_next_bit(free_i->free_segmap,
356 start_segno + sbi->segs_per_sec, start_segno);
357 if (next >= start_segno + sbi->segs_per_sec) {
358 clear_bit(secno, free_i->free_secmap);
359 free_i->free_sections++;
360 }
361 spin_unlock(&free_i->segmap_lock);
362}
363
364static inline void __set_inuse(struct f2fs_sb_info *sbi,
365 unsigned int segno)
366{
367 struct free_segmap_info *free_i = FREE_I(sbi);
368 unsigned int secno = segno / sbi->segs_per_sec;
369 set_bit(segno, free_i->free_segmap);
370 free_i->free_segments--;
371 if (!test_and_set_bit(secno, free_i->free_secmap))
372 free_i->free_sections--;
373}
374
375static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
376 unsigned int segno)
377{
378 struct free_segmap_info *free_i = FREE_I(sbi);
379 unsigned int secno = segno / sbi->segs_per_sec;
380 unsigned int start_segno = secno * sbi->segs_per_sec;
381 unsigned int next;
382
383 spin_lock(&free_i->segmap_lock);
384 if (test_and_clear_bit(segno, free_i->free_segmap)) {
385 free_i->free_segments++;
386
387 next = find_next_bit(free_i->free_segmap,
388 start_segno + sbi->segs_per_sec, start_segno);
389 if (next >= start_segno + sbi->segs_per_sec) {
390 if (test_and_clear_bit(secno, free_i->free_secmap))
391 free_i->free_sections++;
392 }
393 }
394 spin_unlock(&free_i->segmap_lock);
395}
396
397static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
398 unsigned int segno)
399{
400 struct free_segmap_info *free_i = FREE_I(sbi);
401 unsigned int secno = segno / sbi->segs_per_sec;
402 spin_lock(&free_i->segmap_lock);
403 if (!test_and_set_bit(segno, free_i->free_segmap)) {
404 free_i->free_segments--;
405 if (!test_and_set_bit(secno, free_i->free_secmap))
406 free_i->free_sections--;
407 }
408 spin_unlock(&free_i->segmap_lock);
409}
410
411static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
412 void *dst_addr)
413{
414 struct sit_info *sit_i = SIT_I(sbi);
415 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
416}
417
418static inline block_t written_block_count(struct f2fs_sb_info *sbi)
419{
420 return SIT_I(sbi)->written_valid_blocks;
421}
422
423static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
424{
425 return FREE_I(sbi)->free_segments;
426}
427
428static inline int reserved_segments(struct f2fs_sb_info *sbi)
429{
430 return SM_I(sbi)->reserved_segments;
431}
432
433static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
434{
435 return FREE_I(sbi)->free_sections;
436}
437
438static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
439{
440 return DIRTY_I(sbi)->nr_dirty[PRE];
441}
442
443static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
444{
445 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
446 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
447 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
448 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
449 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
450 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
451}
452
453static inline int overprovision_segments(struct f2fs_sb_info *sbi)
454{
455 return SM_I(sbi)->ovp_segments;
456}
457
458static inline int overprovision_sections(struct f2fs_sb_info *sbi)
459{
460 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
461}
462
463static inline int reserved_sections(struct f2fs_sb_info *sbi)
464{
465 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
466}
467
468static inline bool need_SSR(struct f2fs_sb_info *sbi)
469{
470 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
471 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
472 return free_sections(sbi) <= (node_secs + 2 * dent_secs +
473 reserved_sections(sbi) + 1);
474}
475
476static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
477{
478 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
479 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
480
481 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
482 return false;
483
484 return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
485 reserved_sections(sbi));
486}
487
488static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
489{
490 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
491}
492
493static inline int utilization(struct f2fs_sb_info *sbi)
494{
495 return div_u64((u64)valid_user_blocks(sbi) * 100,
496 sbi->user_block_count);
497}
498
499/*
500 * Sometimes f2fs may be better to drop out-of-place update policy.
501 * And, users can control the policy through sysfs entries.
502 * There are five policies with triggering conditions as follows.
503 * F2FS_IPU_FORCE - all the time,
504 * F2FS_IPU_SSR - if SSR mode is activated,
505 * F2FS_IPU_UTIL - if FS utilization is over threashold,
506 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
507 * threashold,
508 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
509 * storages. IPU will be triggered only if the # of dirty
510 * pages over min_fsync_blocks.
511 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
512 */
513#define DEF_MIN_IPU_UTIL 70
514#define DEF_MIN_FSYNC_BLOCKS 8
515
516enum {
517 F2FS_IPU_FORCE,
518 F2FS_IPU_SSR,
519 F2FS_IPU_UTIL,
520 F2FS_IPU_SSR_UTIL,
521 F2FS_IPU_FSYNC,
522};
523
524static inline bool need_inplace_update(struct inode *inode)
525{
526 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
527 unsigned int policy = SM_I(sbi)->ipu_policy;
528
529 /* IPU can be done only for the user data */
530 if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
531 return false;
532
533 if (policy & (0x1 << F2FS_IPU_FORCE))
534 return true;
535 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
536 return true;
537 if (policy & (0x1 << F2FS_IPU_UTIL) &&
538 utilization(sbi) > SM_I(sbi)->min_ipu_util)
539 return true;
540 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
541 utilization(sbi) > SM_I(sbi)->min_ipu_util)
542 return true;
543
544 /* this is only set during fdatasync */
545 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
546 is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
547 return true;
548
549 return false;
550}
551
552static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
553 int type)
554{
555 struct curseg_info *curseg = CURSEG_I(sbi, type);
556 return curseg->segno;
557}
558
559static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
560 int type)
561{
562 struct curseg_info *curseg = CURSEG_I(sbi, type);
563 return curseg->alloc_type;
564}
565
566static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
567{
568 struct curseg_info *curseg = CURSEG_I(sbi, type);
569 return curseg->next_blkoff;
570}
571
572static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
573{
574 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
575}
576
577static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
578{
579 f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
580 || blk_addr >= MAX_BLKADDR(sbi));
581}
582
583/*
584 * Summary block is always treated as an invalid block
585 */
586static inline void check_block_count(struct f2fs_sb_info *sbi,
587 int segno, struct f2fs_sit_entry *raw_sit)
588{
589#ifdef CONFIG_F2FS_CHECK_FS
590 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
591 int valid_blocks = 0;
592 int cur_pos = 0, next_pos;
593
594 /* check bitmap with valid block count */
595 do {
596 if (is_valid) {
597 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
598 sbi->blocks_per_seg,
599 cur_pos);
600 valid_blocks += next_pos - cur_pos;
601 } else
602 next_pos = find_next_bit_le(&raw_sit->valid_map,
603 sbi->blocks_per_seg,
604 cur_pos);
605 cur_pos = next_pos;
606 is_valid = !is_valid;
607 } while (cur_pos < sbi->blocks_per_seg);
608 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
609#endif
610 /* check segment usage, and check boundary of a given segment number */
611 f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
612 || segno > TOTAL_SEGS(sbi) - 1);
613}
614
615static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
616 unsigned int start)
617{
618 struct sit_info *sit_i = SIT_I(sbi);
619 unsigned int offset = SIT_BLOCK_OFFSET(start);
620 block_t blk_addr = sit_i->sit_base_addr + offset;
621
622 check_seg_range(sbi, start);
623
624 /* calculate sit block address */
625 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
626 blk_addr += sit_i->sit_blocks;
627
628 return blk_addr;
629}
630
631static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
632 pgoff_t block_addr)
633{
634 struct sit_info *sit_i = SIT_I(sbi);
635 block_addr -= sit_i->sit_base_addr;
636 if (block_addr < sit_i->sit_blocks)
637 block_addr += sit_i->sit_blocks;
638 else
639 block_addr -= sit_i->sit_blocks;
640
641 return block_addr + sit_i->sit_base_addr;
642}
643
644static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
645{
646 unsigned int block_off = SIT_BLOCK_OFFSET(start);
647
648 f2fs_change_bit(block_off, sit_i->sit_bitmap);
649}
650
651static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
652{
653 struct sit_info *sit_i = SIT_I(sbi);
654 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
655 sit_i->mounted_time;
656}
657
658static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
659 unsigned int ofs_in_node, unsigned char version)
660{
661 sum->nid = cpu_to_le32(nid);
662 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
663 sum->version = version;
664}
665
666static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
667{
668 return __start_cp_addr(sbi) +
669 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
670}
671
672static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
673{
674 return __start_cp_addr(sbi) +
675 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
676 - (base + 1) + type;
677}
678
679static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
680{
681 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
682 return true;
683 return false;
684}
685
686static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
687{
688 struct block_device *bdev = sbi->sb->s_bdev;
689 struct request_queue *q = bdev_get_queue(bdev);
690 return SECTOR_TO_BLOCK(queue_max_sectors(q));
691}
692
693/*
694 * It is very important to gather dirty pages and write at once, so that we can
695 * submit a big bio without interfering other data writes.
696 * By default, 512 pages for directory data,
697 * 512 pages (2MB) * 3 for three types of nodes, and
698 * max_bio_blocks for meta are set.
699 */
700static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
701{
702 if (sbi->sb->s_bdi->wb.dirty_exceeded)
703 return 0;
704
705 if (type == DATA)
706 return sbi->blocks_per_seg;
707 else if (type == NODE)
708 return 3 * sbi->blocks_per_seg;
709 else if (type == META)
710 return MAX_BIO_BLOCKS(sbi);
711 else
712 return 0;
713}
714
715/*
716 * When writing pages, it'd better align nr_to_write for segment size.
717 */
718static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
719 struct writeback_control *wbc)
720{
721 long nr_to_write, desired;
722
723 if (wbc->sync_mode != WB_SYNC_NONE)
724 return 0;
725
726 nr_to_write = wbc->nr_to_write;
727
728 if (type == DATA)
729 desired = 4096;
730 else if (type == NODE)
731 desired = 3 * max_hw_blocks(sbi);
732 else
733 desired = MAX_BIO_BLOCKS(sbi);
734
735 wbc->nr_to_write = desired;
736 return desired - nr_to_write;
737}
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * fs/f2fs/segment.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/blkdev.h>
9#include <linux/backing-dev.h>
10
11/* constant macro */
12#define NULL_SEGNO ((unsigned int)(~0))
13#define NULL_SECNO ((unsigned int)(~0))
14
15#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
16#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
17
18#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19#define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */
20
21/* L: Logical segment # in volume, R: Relative segment # in main area */
22#define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
23#define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
24
25#define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
26#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE)
27#define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA))
28
29static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
30 unsigned short seg_type)
31{
32 f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG);
33}
34
35#define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
36#define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
37#define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
38
39#define IS_CURSEG(sbi, seg) \
40 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
41 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
42 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
43 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
44 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
45 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \
46 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \
47 ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno))
48
49#define IS_CURSEC(sbi, secno) \
50 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
51 (sbi)->segs_per_sec) || \
52 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
53 (sbi)->segs_per_sec) || \
54 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
55 (sbi)->segs_per_sec) || \
56 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
57 (sbi)->segs_per_sec) || \
58 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
59 (sbi)->segs_per_sec) || \
60 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
61 (sbi)->segs_per_sec) || \
62 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
63 (sbi)->segs_per_sec) || \
64 ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
65 (sbi)->segs_per_sec))
66
67#define MAIN_BLKADDR(sbi) \
68 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
69 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
70#define SEG0_BLKADDR(sbi) \
71 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
72 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
73
74#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
75#define MAIN_SECS(sbi) ((sbi)->total_sections)
76
77#define TOTAL_SEGS(sbi) \
78 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
79 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
80#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
81
82#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
83#define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
84 (sbi)->log_blocks_per_seg))
85
86#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
87 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
88
89#define NEXT_FREE_BLKADDR(sbi, curseg) \
90 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
91
92#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
93#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
94 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
95#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
96 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
97
98#define GET_SEGNO(sbi, blk_addr) \
99 ((!__is_valid_data_blkaddr(blk_addr)) ? \
100 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
101 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
102#define BLKS_PER_SEC(sbi) \
103 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
104#define CAP_BLKS_PER_SEC(sbi) \
105 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \
106 (sbi)->unusable_blocks_per_sec)
107#define CAP_SEGS_PER_SEC(sbi) \
108 ((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
109 (sbi)->log_blocks_per_seg))
110#define GET_SEC_FROM_SEG(sbi, segno) \
111 (((segno) == -1) ? -1 : (segno) / (sbi)->segs_per_sec)
112#define GET_SEG_FROM_SEC(sbi, secno) \
113 ((secno) * (sbi)->segs_per_sec)
114#define GET_ZONE_FROM_SEC(sbi, secno) \
115 (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
116#define GET_ZONE_FROM_SEG(sbi, segno) \
117 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
118
119#define GET_SUM_BLOCK(sbi, segno) \
120 ((sbi)->sm_info->ssa_blkaddr + (segno))
121
122#define GET_SUM_TYPE(footer) ((footer)->entry_type)
123#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
124
125#define SIT_ENTRY_OFFSET(sit_i, segno) \
126 ((segno) % (sit_i)->sents_per_block)
127#define SIT_BLOCK_OFFSET(segno) \
128 ((segno) / SIT_ENTRY_PER_BLOCK)
129#define START_SEGNO(segno) \
130 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
131#define SIT_BLK_CNT(sbi) \
132 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
133#define f2fs_bitmap_size(nr) \
134 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
135
136#define SECTOR_FROM_BLOCK(blk_addr) \
137 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
138#define SECTOR_TO_BLOCK(sectors) \
139 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
140
141/*
142 * indicate a block allocation direction: RIGHT and LEFT.
143 * RIGHT means allocating new sections towards the end of volume.
144 * LEFT means the opposite direction.
145 */
146enum {
147 ALLOC_RIGHT = 0,
148 ALLOC_LEFT
149};
150
151/*
152 * In the victim_sel_policy->alloc_mode, there are three block allocation modes.
153 * LFS writes data sequentially with cleaning operations.
154 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
155 * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into
156 * fragmented segment which has similar aging degree.
157 */
158enum {
159 LFS = 0,
160 SSR,
161 AT_SSR,
162};
163
164/*
165 * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes.
166 * GC_CB is based on cost-benefit algorithm.
167 * GC_GREEDY is based on greedy algorithm.
168 * GC_AT is based on age-threshold algorithm.
169 */
170enum {
171 GC_CB = 0,
172 GC_GREEDY,
173 GC_AT,
174 ALLOC_NEXT,
175 FLUSH_DEVICE,
176 MAX_GC_POLICY,
177};
178
179/*
180 * BG_GC means the background cleaning job.
181 * FG_GC means the on-demand cleaning job.
182 */
183enum {
184 BG_GC = 0,
185 FG_GC,
186};
187
188/* for a function parameter to select a victim segment */
189struct victim_sel_policy {
190 int alloc_mode; /* LFS or SSR */
191 int gc_mode; /* GC_CB or GC_GREEDY */
192 unsigned long *dirty_bitmap; /* dirty segment/section bitmap */
193 unsigned int max_search; /*
194 * maximum # of segments/sections
195 * to search
196 */
197 unsigned int offset; /* last scanned bitmap offset */
198 unsigned int ofs_unit; /* bitmap search unit */
199 unsigned int min_cost; /* minimum cost */
200 unsigned long long oldest_age; /* oldest age of segments having the same min cost */
201 unsigned int min_segno; /* segment # having min. cost */
202 unsigned long long age; /* mtime of GCed section*/
203 unsigned long long age_threshold;/* age threshold */
204};
205
206struct seg_entry {
207 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
208 unsigned int valid_blocks:10; /* # of valid blocks */
209 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
210 unsigned int padding:6; /* padding */
211 unsigned char *cur_valid_map; /* validity bitmap of blocks */
212#ifdef CONFIG_F2FS_CHECK_FS
213 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
214#endif
215 /*
216 * # of valid blocks and the validity bitmap stored in the last
217 * checkpoint pack. This information is used by the SSR mode.
218 */
219 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
220 unsigned char *discard_map;
221 unsigned long long mtime; /* modification time of the segment */
222};
223
224struct sec_entry {
225 unsigned int valid_blocks; /* # of valid blocks in a section */
226};
227
228#define MAX_SKIP_GC_COUNT 16
229
230struct revoke_entry {
231 struct list_head list;
232 block_t old_addr; /* for revoking when fail to commit */
233 pgoff_t index;
234};
235
236struct sit_info {
237 block_t sit_base_addr; /* start block address of SIT area */
238 block_t sit_blocks; /* # of blocks used by SIT area */
239 block_t written_valid_blocks; /* # of valid blocks in main area */
240 char *bitmap; /* all bitmaps pointer */
241 char *sit_bitmap; /* SIT bitmap pointer */
242#ifdef CONFIG_F2FS_CHECK_FS
243 char *sit_bitmap_mir; /* SIT bitmap mirror */
244
245 /* bitmap of segments to be ignored by GC in case of errors */
246 unsigned long *invalid_segmap;
247#endif
248 unsigned int bitmap_size; /* SIT bitmap size */
249
250 unsigned long *tmp_map; /* bitmap for temporal use */
251 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
252 unsigned int dirty_sentries; /* # of dirty sentries */
253 unsigned int sents_per_block; /* # of SIT entries per block */
254 struct rw_semaphore sentry_lock; /* to protect SIT cache */
255 struct seg_entry *sentries; /* SIT segment-level cache */
256 struct sec_entry *sec_entries; /* SIT section-level cache */
257
258 /* for cost-benefit algorithm in cleaning procedure */
259 unsigned long long elapsed_time; /* elapsed time after mount */
260 unsigned long long mounted_time; /* mount time */
261 unsigned long long min_mtime; /* min. modification time */
262 unsigned long long max_mtime; /* max. modification time */
263 unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */
264 unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */
265
266 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
267};
268
269struct free_segmap_info {
270 unsigned int start_segno; /* start segment number logically */
271 unsigned int free_segments; /* # of free segments */
272 unsigned int free_sections; /* # of free sections */
273 spinlock_t segmap_lock; /* free segmap lock */
274 unsigned long *free_segmap; /* free segment bitmap */
275 unsigned long *free_secmap; /* free section bitmap */
276};
277
278/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
279enum dirty_type {
280 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
281 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
282 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
283 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
284 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
285 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
286 DIRTY, /* to count # of dirty segments */
287 PRE, /* to count # of entirely obsolete segments */
288 NR_DIRTY_TYPE
289};
290
291struct dirty_seglist_info {
292 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
293 unsigned long *dirty_secmap;
294 struct mutex seglist_lock; /* lock for segment bitmaps */
295 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
296 unsigned long *victim_secmap; /* background GC victims */
297 unsigned long *pinned_secmap; /* pinned victims from foreground GC */
298 unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */
299 bool enable_pin_section; /* enable pinning section */
300};
301
302/* for active log information */
303struct curseg_info {
304 struct mutex curseg_mutex; /* lock for consistency */
305 struct f2fs_summary_block *sum_blk; /* cached summary block */
306 struct rw_semaphore journal_rwsem; /* protect journal area */
307 struct f2fs_journal *journal; /* cached journal info */
308 unsigned char alloc_type; /* current allocation type */
309 unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */
310 unsigned int segno; /* current segment number */
311 unsigned short next_blkoff; /* next block offset to write */
312 unsigned int zone; /* current zone number */
313 unsigned int next_segno; /* preallocated segment */
314 int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */
315 bool inited; /* indicate inmem log is inited */
316};
317
318struct sit_entry_set {
319 struct list_head set_list; /* link with all sit sets */
320 unsigned int start_segno; /* start segno of sits in set */
321 unsigned int entry_cnt; /* the # of sit entries in set */
322};
323
324/*
325 * inline functions
326 */
327static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
328{
329 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
330}
331
332static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
333 unsigned int segno)
334{
335 struct sit_info *sit_i = SIT_I(sbi);
336 return &sit_i->sentries[segno];
337}
338
339static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
340 unsigned int segno)
341{
342 struct sit_info *sit_i = SIT_I(sbi);
343 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
344}
345
346static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
347 unsigned int segno, bool use_section)
348{
349 /*
350 * In order to get # of valid blocks in a section instantly from many
351 * segments, f2fs manages two counting structures separately.
352 */
353 if (use_section && __is_large_section(sbi))
354 return get_sec_entry(sbi, segno)->valid_blocks;
355 else
356 return get_seg_entry(sbi, segno)->valid_blocks;
357}
358
359static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
360 unsigned int segno, bool use_section)
361{
362 if (use_section && __is_large_section(sbi)) {
363 unsigned int start_segno = START_SEGNO(segno);
364 unsigned int blocks = 0;
365 int i;
366
367 for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
368 struct seg_entry *se = get_seg_entry(sbi, start_segno);
369
370 blocks += se->ckpt_valid_blocks;
371 }
372 return blocks;
373 }
374 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
375}
376
377static inline void seg_info_from_raw_sit(struct seg_entry *se,
378 struct f2fs_sit_entry *rs)
379{
380 se->valid_blocks = GET_SIT_VBLOCKS(rs);
381 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
382 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
383 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
384#ifdef CONFIG_F2FS_CHECK_FS
385 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
386#endif
387 se->type = GET_SIT_TYPE(rs);
388 se->mtime = le64_to_cpu(rs->mtime);
389}
390
391static inline void __seg_info_to_raw_sit(struct seg_entry *se,
392 struct f2fs_sit_entry *rs)
393{
394 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
395 se->valid_blocks;
396 rs->vblocks = cpu_to_le16(raw_vblocks);
397 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
398 rs->mtime = cpu_to_le64(se->mtime);
399}
400
401static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
402 struct page *page, unsigned int start)
403{
404 struct f2fs_sit_block *raw_sit;
405 struct seg_entry *se;
406 struct f2fs_sit_entry *rs;
407 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
408 (unsigned long)MAIN_SEGS(sbi));
409 int i;
410
411 raw_sit = (struct f2fs_sit_block *)page_address(page);
412 memset(raw_sit, 0, PAGE_SIZE);
413 for (i = 0; i < end - start; i++) {
414 rs = &raw_sit->entries[i];
415 se = get_seg_entry(sbi, start + i);
416 __seg_info_to_raw_sit(se, rs);
417 }
418}
419
420static inline void seg_info_to_raw_sit(struct seg_entry *se,
421 struct f2fs_sit_entry *rs)
422{
423 __seg_info_to_raw_sit(se, rs);
424
425 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
426 se->ckpt_valid_blocks = se->valid_blocks;
427}
428
429static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
430 unsigned int max, unsigned int segno)
431{
432 unsigned int ret;
433 spin_lock(&free_i->segmap_lock);
434 ret = find_next_bit(free_i->free_segmap, max, segno);
435 spin_unlock(&free_i->segmap_lock);
436 return ret;
437}
438
439static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
440{
441 struct free_segmap_info *free_i = FREE_I(sbi);
442 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
443 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
444 unsigned int next;
445 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
446
447 spin_lock(&free_i->segmap_lock);
448 clear_bit(segno, free_i->free_segmap);
449 free_i->free_segments++;
450
451 next = find_next_bit(free_i->free_segmap,
452 start_segno + sbi->segs_per_sec, start_segno);
453 if (next >= start_segno + usable_segs) {
454 clear_bit(secno, free_i->free_secmap);
455 free_i->free_sections++;
456 }
457 spin_unlock(&free_i->segmap_lock);
458}
459
460static inline void __set_inuse(struct f2fs_sb_info *sbi,
461 unsigned int segno)
462{
463 struct free_segmap_info *free_i = FREE_I(sbi);
464 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
465
466 set_bit(segno, free_i->free_segmap);
467 free_i->free_segments--;
468 if (!test_and_set_bit(secno, free_i->free_secmap))
469 free_i->free_sections--;
470}
471
472static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
473 unsigned int segno, bool inmem)
474{
475 struct free_segmap_info *free_i = FREE_I(sbi);
476 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
477 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
478 unsigned int next;
479 unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi, segno);
480
481 spin_lock(&free_i->segmap_lock);
482 if (test_and_clear_bit(segno, free_i->free_segmap)) {
483 free_i->free_segments++;
484
485 if (!inmem && IS_CURSEC(sbi, secno))
486 goto skip_free;
487 next = find_next_bit(free_i->free_segmap,
488 start_segno + sbi->segs_per_sec, start_segno);
489 if (next >= start_segno + usable_segs) {
490 if (test_and_clear_bit(secno, free_i->free_secmap))
491 free_i->free_sections++;
492 }
493 }
494skip_free:
495 spin_unlock(&free_i->segmap_lock);
496}
497
498static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
499 unsigned int segno)
500{
501 struct free_segmap_info *free_i = FREE_I(sbi);
502 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
503
504 spin_lock(&free_i->segmap_lock);
505 if (!test_and_set_bit(segno, free_i->free_segmap)) {
506 free_i->free_segments--;
507 if (!test_and_set_bit(secno, free_i->free_secmap))
508 free_i->free_sections--;
509 }
510 spin_unlock(&free_i->segmap_lock);
511}
512
513static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
514 void *dst_addr)
515{
516 struct sit_info *sit_i = SIT_I(sbi);
517
518#ifdef CONFIG_F2FS_CHECK_FS
519 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
520 sit_i->bitmap_size))
521 f2fs_bug_on(sbi, 1);
522#endif
523 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
524}
525
526static inline block_t written_block_count(struct f2fs_sb_info *sbi)
527{
528 return SIT_I(sbi)->written_valid_blocks;
529}
530
531static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
532{
533 return FREE_I(sbi)->free_segments;
534}
535
536static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
537{
538 return SM_I(sbi)->reserved_segments +
539 SM_I(sbi)->additional_reserved_segments;
540}
541
542static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
543{
544 return FREE_I(sbi)->free_sections;
545}
546
547static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
548{
549 return DIRTY_I(sbi)->nr_dirty[PRE];
550}
551
552static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
553{
554 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
555 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
556 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
557 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
558 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
559 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
560}
561
562static inline int overprovision_segments(struct f2fs_sb_info *sbi)
563{
564 return SM_I(sbi)->ovp_segments;
565}
566
567static inline int reserved_sections(struct f2fs_sb_info *sbi)
568{
569 return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi));
570}
571
572static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
573 unsigned int node_blocks, unsigned int dent_blocks)
574{
575
576 unsigned int segno, left_blocks;
577 int i;
578
579 /* check current node segment */
580 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
581 segno = CURSEG_I(sbi, i)->segno;
582 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
583 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
584
585 if (node_blocks > left_blocks)
586 return false;
587 }
588
589 /* check current data segment */
590 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
591 left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
592 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
593 if (dent_blocks > left_blocks)
594 return false;
595 return true;
596}
597
598/*
599 * calculate needed sections for dirty node/dentry
600 * and call has_curseg_enough_space
601 */
602static inline void __get_secs_required(struct f2fs_sb_info *sbi,
603 unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p)
604{
605 unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
606 get_pages(sbi, F2FS_DIRTY_DENTS) +
607 get_pages(sbi, F2FS_DIRTY_IMETA);
608 unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
609 unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi);
610 unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi);
611 unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi);
612 unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi);
613
614 if (lower_p)
615 *lower_p = node_secs + dent_secs;
616 if (upper_p)
617 *upper_p = node_secs + dent_secs +
618 (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
619 if (curseg_p)
620 *curseg_p = has_curseg_enough_space(sbi,
621 node_blocks, dent_blocks);
622}
623
624static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
625 int freed, int needed)
626{
627 unsigned int free_secs, lower_secs, upper_secs;
628 bool curseg_space;
629
630 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
631 return false;
632
633 __get_secs_required(sbi, &lower_secs, &upper_secs, &curseg_space);
634
635 free_secs = free_sections(sbi) + freed;
636 lower_secs += needed + reserved_sections(sbi);
637 upper_secs += needed + reserved_sections(sbi);
638
639 if (free_secs > upper_secs)
640 return false;
641 else if (free_secs <= lower_secs)
642 return true;
643 return !curseg_space;
644}
645
646static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi,
647 int freed, int needed)
648{
649 return !has_not_enough_free_secs(sbi, freed, needed);
650}
651
652static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
653{
654 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
655 return true;
656 if (likely(has_enough_free_secs(sbi, 0, 0)))
657 return true;
658 return false;
659}
660
661static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
662{
663 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
664}
665
666static inline int utilization(struct f2fs_sb_info *sbi)
667{
668 return div_u64((u64)valid_user_blocks(sbi) * 100,
669 sbi->user_block_count);
670}
671
672/*
673 * Sometimes f2fs may be better to drop out-of-place update policy.
674 * And, users can control the policy through sysfs entries.
675 * There are five policies with triggering conditions as follows.
676 * F2FS_IPU_FORCE - all the time,
677 * F2FS_IPU_SSR - if SSR mode is activated,
678 * F2FS_IPU_UTIL - if FS utilization is over threashold,
679 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
680 * threashold,
681 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
682 * storages. IPU will be triggered only if the # of dirty
683 * pages over min_fsync_blocks. (=default option)
684 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
685 * F2FS_IPU_NOCACHE - disable IPU bio cache.
686 * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
687 * FI_OPU_WRITE flag.
688 * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
689 */
690#define DEF_MIN_IPU_UTIL 70
691#define DEF_MIN_FSYNC_BLOCKS 8
692#define DEF_MIN_HOT_BLOCKS 16
693
694#define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
695
696#define F2FS_IPU_DISABLE 0
697
698/* Modification on enum should be synchronized with ipu_mode_names array */
699enum {
700 F2FS_IPU_FORCE,
701 F2FS_IPU_SSR,
702 F2FS_IPU_UTIL,
703 F2FS_IPU_SSR_UTIL,
704 F2FS_IPU_FSYNC,
705 F2FS_IPU_ASYNC,
706 F2FS_IPU_NOCACHE,
707 F2FS_IPU_HONOR_OPU_WRITE,
708 F2FS_IPU_MAX,
709};
710
711static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi)
712{
713 return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE;
714}
715
716#define F2FS_IPU_POLICY(name) \
717static inline bool IS_##name(struct f2fs_sb_info *sbi) \
718{ \
719 return SM_I(sbi)->ipu_policy & BIT(name); \
720}
721
722F2FS_IPU_POLICY(F2FS_IPU_FORCE);
723F2FS_IPU_POLICY(F2FS_IPU_SSR);
724F2FS_IPU_POLICY(F2FS_IPU_UTIL);
725F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL);
726F2FS_IPU_POLICY(F2FS_IPU_FSYNC);
727F2FS_IPU_POLICY(F2FS_IPU_ASYNC);
728F2FS_IPU_POLICY(F2FS_IPU_NOCACHE);
729F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE);
730
731static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
732 int type)
733{
734 struct curseg_info *curseg = CURSEG_I(sbi, type);
735 return curseg->segno;
736}
737
738static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
739 int type)
740{
741 struct curseg_info *curseg = CURSEG_I(sbi, type);
742 return curseg->alloc_type;
743}
744
745static inline bool valid_main_segno(struct f2fs_sb_info *sbi,
746 unsigned int segno)
747{
748 return segno <= (MAIN_SEGS(sbi) - 1);
749}
750
751static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
752{
753 struct f2fs_sb_info *sbi = fio->sbi;
754
755 if (__is_valid_data_blkaddr(fio->old_blkaddr))
756 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
757 META_GENERIC : DATA_GENERIC);
758 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
759 META_GENERIC : DATA_GENERIC_ENHANCE);
760}
761
762/*
763 * Summary block is always treated as an invalid block
764 */
765static inline int check_block_count(struct f2fs_sb_info *sbi,
766 int segno, struct f2fs_sit_entry *raw_sit)
767{
768 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
769 int valid_blocks = 0;
770 int cur_pos = 0, next_pos;
771 unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno);
772
773 /* check bitmap with valid block count */
774 do {
775 if (is_valid) {
776 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
777 usable_blks_per_seg,
778 cur_pos);
779 valid_blocks += next_pos - cur_pos;
780 } else
781 next_pos = find_next_bit_le(&raw_sit->valid_map,
782 usable_blks_per_seg,
783 cur_pos);
784 cur_pos = next_pos;
785 is_valid = !is_valid;
786 } while (cur_pos < usable_blks_per_seg);
787
788 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
789 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
790 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
791 set_sbi_flag(sbi, SBI_NEED_FSCK);
792 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
793 return -EFSCORRUPTED;
794 }
795
796 if (usable_blks_per_seg < sbi->blocks_per_seg)
797 f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
798 sbi->blocks_per_seg,
799 usable_blks_per_seg) != sbi->blocks_per_seg);
800
801 /* check segment usage, and check boundary of a given segment number */
802 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
803 || !valid_main_segno(sbi, segno))) {
804 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
805 GET_SIT_VBLOCKS(raw_sit), segno);
806 set_sbi_flag(sbi, SBI_NEED_FSCK);
807 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT);
808 return -EFSCORRUPTED;
809 }
810 return 0;
811}
812
813static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
814 unsigned int start)
815{
816 struct sit_info *sit_i = SIT_I(sbi);
817 unsigned int offset = SIT_BLOCK_OFFSET(start);
818 block_t blk_addr = sit_i->sit_base_addr + offset;
819
820 f2fs_bug_on(sbi, !valid_main_segno(sbi, start));
821
822#ifdef CONFIG_F2FS_CHECK_FS
823 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
824 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
825 f2fs_bug_on(sbi, 1);
826#endif
827
828 /* calculate sit block address */
829 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
830 blk_addr += sit_i->sit_blocks;
831
832 return blk_addr;
833}
834
835static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
836 pgoff_t block_addr)
837{
838 struct sit_info *sit_i = SIT_I(sbi);
839 block_addr -= sit_i->sit_base_addr;
840 if (block_addr < sit_i->sit_blocks)
841 block_addr += sit_i->sit_blocks;
842 else
843 block_addr -= sit_i->sit_blocks;
844
845 return block_addr + sit_i->sit_base_addr;
846}
847
848static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
849{
850 unsigned int block_off = SIT_BLOCK_OFFSET(start);
851
852 f2fs_change_bit(block_off, sit_i->sit_bitmap);
853#ifdef CONFIG_F2FS_CHECK_FS
854 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
855#endif
856}
857
858static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
859 bool base_time)
860{
861 struct sit_info *sit_i = SIT_I(sbi);
862 time64_t diff, now = ktime_get_boottime_seconds();
863
864 if (now >= sit_i->mounted_time)
865 return sit_i->elapsed_time + now - sit_i->mounted_time;
866
867 /* system time is set to the past */
868 if (!base_time) {
869 diff = sit_i->mounted_time - now;
870 if (sit_i->elapsed_time >= diff)
871 return sit_i->elapsed_time - diff;
872 return 0;
873 }
874 return sit_i->elapsed_time;
875}
876
877static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
878 unsigned int ofs_in_node, unsigned char version)
879{
880 sum->nid = cpu_to_le32(nid);
881 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
882 sum->version = version;
883}
884
885static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
886{
887 return __start_cp_addr(sbi) +
888 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
889}
890
891static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
892{
893 return __start_cp_addr(sbi) +
894 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
895 - (base + 1) + type;
896}
897
898static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
899{
900 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
901 return true;
902 return false;
903}
904
905/*
906 * It is very important to gather dirty pages and write at once, so that we can
907 * submit a big bio without interfering other data writes.
908 * By default, 512 pages for directory data,
909 * 512 pages (2MB) * 8 for nodes, and
910 * 256 pages * 8 for meta are set.
911 */
912static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
913{
914 if (sbi->sb->s_bdi->wb.dirty_exceeded)
915 return 0;
916
917 if (type == DATA)
918 return sbi->blocks_per_seg;
919 else if (type == NODE)
920 return 8 * sbi->blocks_per_seg;
921 else if (type == META)
922 return 8 * BIO_MAX_VECS;
923 else
924 return 0;
925}
926
927/*
928 * When writing pages, it'd better align nr_to_write for segment size.
929 */
930static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
931 struct writeback_control *wbc)
932{
933 long nr_to_write, desired;
934
935 if (wbc->sync_mode != WB_SYNC_NONE)
936 return 0;
937
938 nr_to_write = wbc->nr_to_write;
939 desired = BIO_MAX_VECS;
940 if (type == NODE)
941 desired <<= 1;
942
943 wbc->nr_to_write = desired;
944 return desired - nr_to_write;
945}
946
947static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
948{
949 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
950 bool wakeup = false;
951 int i;
952
953 if (force)
954 goto wake_up;
955
956 mutex_lock(&dcc->cmd_lock);
957 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
958 if (i + 1 < dcc->discard_granularity)
959 break;
960 if (!list_empty(&dcc->pend_list[i])) {
961 wakeup = true;
962 break;
963 }
964 }
965 mutex_unlock(&dcc->cmd_lock);
966 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
967 return;
968wake_up:
969 dcc->discard_wake = true;
970 wake_up_interruptible_all(&dcc->discard_wait_queue);
971}