Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * fs/f2fs/node.h
  3 *
  4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5 *             http://www.samsung.com/
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11/* start node id of a node block dedicated to the given node id */
 12#define	START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
 13
 14/* node block offset on the NAT area dedicated to the given start node id */
 15#define	NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
 16
 17/* # of pages to perform synchronous readahead before building free nids */
 18#define FREE_NID_PAGES 4
 
 19
 20#define DEF_RA_NID_PAGES	4	/* # of nid pages to be readaheaded */
 21
 22/* maximum readahead size for node during getting data blocks */
 23#define MAX_RA_NODE		128
 24
 25/* control the memory footprint threshold (10MB per 1GB ram) */
 26#define DEF_RAM_THRESHOLD	10
 27
 28/* control dirty nats ratio threshold (default: 10% over max nid count) */
 29#define DEF_DIRTY_NAT_RATIO_THRESHOLD		10
 
 
 30
 31/* vector size for gang look-up from nat cache that consists of radix tree */
 32#define NATVEC_SIZE	64
 33#define SETVEC_SIZE	32
 34
 35/* return value for read_node_page */
 36#define LOCKED_PAGE	1
 37
 38/* For flag in struct node_info */
 39enum {
 40	IS_CHECKPOINTED,	/* is it checkpointed before? */
 41	HAS_FSYNCED_INODE,	/* is the inode fsynced before? */
 42	HAS_LAST_FSYNC,		/* has the latest node fsync mark? */
 43	IS_DIRTY,		/* this nat entry is dirty? */
 44};
 45
 46/*
 47 * For node information
 48 */
 49struct node_info {
 50	nid_t nid;		/* node id */
 51	nid_t ino;		/* inode number of the node's owner */
 52	block_t	blk_addr;	/* block address of the node */
 53	unsigned char version;	/* version of the node */
 54	unsigned char flag;	/* for node information bits */
 55};
 56
 57struct nat_entry {
 58	struct list_head list;	/* for clean or dirty nat list */
 59	struct node_info ni;	/* in-memory node information */
 60};
 61
 62#define nat_get_nid(nat)		(nat->ni.nid)
 63#define nat_set_nid(nat, n)		(nat->ni.nid = n)
 64#define nat_get_blkaddr(nat)		(nat->ni.blk_addr)
 65#define nat_set_blkaddr(nat, b)		(nat->ni.blk_addr = b)
 66#define nat_get_ino(nat)		(nat->ni.ino)
 67#define nat_set_ino(nat, i)		(nat->ni.ino = i)
 68#define nat_get_version(nat)		(nat->ni.version)
 69#define nat_set_version(nat, v)		(nat->ni.version = v)
 70
 71#define inc_node_version(version)	(++version)
 72
 73static inline void copy_node_info(struct node_info *dst,
 74						struct node_info *src)
 75{
 76	dst->nid = src->nid;
 77	dst->ino = src->ino;
 78	dst->blk_addr = src->blk_addr;
 79	dst->version = src->version;
 80	/* should not copy flag here */
 81}
 82
 83static inline void set_nat_flag(struct nat_entry *ne,
 84				unsigned int type, bool set)
 85{
 86	unsigned char mask = 0x01 << type;
 87	if (set)
 88		ne->ni.flag |= mask;
 89	else
 90		ne->ni.flag &= ~mask;
 91}
 92
 93static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
 94{
 95	unsigned char mask = 0x01 << type;
 96	return ne->ni.flag & mask;
 97}
 98
 99static inline void nat_reset_flag(struct nat_entry *ne)
100{
101	/* these states can be set only after checkpoint was done */
102	set_nat_flag(ne, IS_CHECKPOINTED, true);
103	set_nat_flag(ne, HAS_FSYNCED_INODE, false);
104	set_nat_flag(ne, HAS_LAST_FSYNC, true);
105}
106
107static inline void node_info_from_raw_nat(struct node_info *ni,
108						struct f2fs_nat_entry *raw_ne)
109{
110	ni->ino = le32_to_cpu(raw_ne->ino);
111	ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
112	ni->version = raw_ne->version;
113}
114
115static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
116						struct node_info *ni)
117{
118	raw_ne->ino = cpu_to_le32(ni->ino);
119	raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
120	raw_ne->version = ni->version;
121}
122
123static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
124{
125	return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
126					NM_I(sbi)->dirty_nats_ratio / 100;
127}
128
 
 
 
 
 
129enum mem_type {
130	FREE_NIDS,	/* indicates the free nid list */
131	NAT_ENTRIES,	/* indicates the cached nat entry */
132	DIRTY_DENTS,	/* indicates dirty dentry pages */
133	INO_ENTRIES,	/* indicates inode entries */
134	EXTENT_CACHE,	/* indicates extent cache */
135	BASE_CHECK,	/* check kernel status */
136};
137
138struct nat_entry_set {
139	struct list_head set_list;	/* link with other nat sets */
140	struct list_head entry_list;	/* link with dirty nat entries */
141	nid_t set;			/* set number*/
142	unsigned int entry_cnt;		/* the # of nat entries in set */
143};
144
145/*
146 * For free nid mangement
147 */
148enum nid_state {
149	NID_NEW,	/* newly added to free nid list */
150	NID_ALLOC	/* it is allocated */
151};
152
153struct free_nid {
154	struct list_head list;	/* for free node id list */
155	nid_t nid;		/* node id */
156	int state;		/* in use or not: NID_NEW or NID_ALLOC */
157};
158
159static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
160{
161	struct f2fs_nm_info *nm_i = NM_I(sbi);
162	struct free_nid *fnid;
163
164	spin_lock(&nm_i->free_nid_list_lock);
165	if (nm_i->fcnt <= 0) {
166		spin_unlock(&nm_i->free_nid_list_lock);
167		return;
168	}
169	fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
 
170	*nid = fnid->nid;
171	spin_unlock(&nm_i->free_nid_list_lock);
172}
173
174/*
175 * inline functions
176 */
177static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
178{
179	struct f2fs_nm_info *nm_i = NM_I(sbi);
180	memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
181}
182
183static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
184{
185	struct f2fs_nm_info *nm_i = NM_I(sbi);
186	pgoff_t block_off;
187	pgoff_t block_addr;
188	int seg_off;
189
190	block_off = NAT_BLOCK_OFFSET(start);
191	seg_off = block_off >> sbi->log_blocks_per_seg;
192
193	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
194		(seg_off << sbi->log_blocks_per_seg << 1) +
195		(block_off & (sbi->blocks_per_seg - 1)));
196
197	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
198		block_addr += sbi->blocks_per_seg;
199
200	return block_addr;
201}
202
203static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
204						pgoff_t block_addr)
205{
206	struct f2fs_nm_info *nm_i = NM_I(sbi);
207
208	block_addr -= nm_i->nat_blkaddr;
209	if ((block_addr >> sbi->log_blocks_per_seg) % 2)
210		block_addr -= sbi->blocks_per_seg;
211	else
212		block_addr += sbi->blocks_per_seg;
213
214	return block_addr + nm_i->nat_blkaddr;
215}
216
217static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
218{
219	unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
220
221	f2fs_change_bit(block_off, nm_i->nat_bitmap);
222}
223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224static inline void fill_node_footer(struct page *page, nid_t nid,
225				nid_t ino, unsigned int ofs, bool reset)
226{
227	struct f2fs_node *rn = F2FS_NODE(page);
228	unsigned int old_flag = 0;
229
230	if (reset)
231		memset(rn, 0, sizeof(*rn));
232	else
233		old_flag = le32_to_cpu(rn->footer.flag);
234
235	rn->footer.nid = cpu_to_le32(nid);
236	rn->footer.ino = cpu_to_le32(ino);
237
238	/* should remain old flag bits such as COLD_BIT_SHIFT */
239	rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
240					(old_flag & OFFSET_BIT_MASK));
241}
242
243static inline void copy_node_footer(struct page *dst, struct page *src)
244{
245	struct f2fs_node *src_rn = F2FS_NODE(src);
246	struct f2fs_node *dst_rn = F2FS_NODE(dst);
247	memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
248}
249
250static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
251{
252	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
253	struct f2fs_node *rn = F2FS_NODE(page);
 
 
254
255	rn->footer.cp_ver = ckpt->checkpoint_ver;
 
 
 
 
 
256	rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
257}
258
259static inline nid_t ino_of_node(struct page *node_page)
260{
261	struct f2fs_node *rn = F2FS_NODE(node_page);
262	return le32_to_cpu(rn->footer.ino);
263}
264
265static inline nid_t nid_of_node(struct page *node_page)
266{
267	struct f2fs_node *rn = F2FS_NODE(node_page);
268	return le32_to_cpu(rn->footer.nid);
269}
270
271static inline unsigned int ofs_of_node(struct page *node_page)
272{
273	struct f2fs_node *rn = F2FS_NODE(node_page);
274	unsigned flag = le32_to_cpu(rn->footer.flag);
275	return flag >> OFFSET_BIT_SHIFT;
276}
277
278static inline unsigned long long cpver_of_node(struct page *node_page)
279{
280	struct f2fs_node *rn = F2FS_NODE(node_page);
281	return le64_to_cpu(rn->footer.cp_ver);
282}
283
284static inline block_t next_blkaddr_of_node(struct page *node_page)
285{
286	struct f2fs_node *rn = F2FS_NODE(node_page);
287	return le32_to_cpu(rn->footer.next_blkaddr);
 
 
288}
289
290/*
291 * f2fs assigns the following node offsets described as (num).
292 * N = NIDS_PER_BLOCK
293 *
294 *  Inode block (0)
295 *    |- direct node (1)
296 *    |- direct node (2)
297 *    |- indirect node (3)
298 *    |            `- direct node (4 => 4 + N - 1)
299 *    |- indirect node (4 + N)
300 *    |            `- direct node (5 + N => 5 + 2N - 1)
301 *    `- double indirect node (5 + 2N)
302 *                 `- indirect node (6 + 2N)
303 *                       `- direct node
304 *                 ......
305 *                 `- indirect node ((6 + 2N) + x(N + 1))
306 *                       `- direct node
307 *                 ......
308 *                 `- indirect node ((6 + 2N) + (N - 1)(N + 1))
309 *                       `- direct node
310 */
311static inline bool IS_DNODE(struct page *node_page)
312{
313	unsigned int ofs = ofs_of_node(node_page);
314
315	if (f2fs_has_xattr_block(ofs))
316		return false;
317
318	if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
319			ofs == 5 + 2 * NIDS_PER_BLOCK)
320		return false;
321	if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
322		ofs -= 6 + 2 * NIDS_PER_BLOCK;
323		if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
324			return false;
325	}
326	return true;
327}
328
329static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
330{
331	struct f2fs_node *rn = F2FS_NODE(p);
332
333	f2fs_wait_on_page_writeback(p, NODE, true);
334
335	if (i)
336		rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
337	else
338		rn->in.nid[off] = cpu_to_le32(nid);
339	return set_page_dirty(p);
340}
341
342static inline nid_t get_nid(struct page *p, int off, bool i)
343{
344	struct f2fs_node *rn = F2FS_NODE(p);
345
346	if (i)
347		return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
348	return le32_to_cpu(rn->in.nid[off]);
349}
350
351/*
352 * Coldness identification:
353 *  - Mark cold files in f2fs_inode_info
354 *  - Mark cold node blocks in their node footer
355 *  - Mark cold data pages in page cache
356 */
357static inline int is_cold_data(struct page *page)
358{
359	return PageChecked(page);
360}
361
362static inline void set_cold_data(struct page *page)
363{
364	SetPageChecked(page);
365}
366
367static inline void clear_cold_data(struct page *page)
368{
369	ClearPageChecked(page);
370}
371
372static inline int is_node(struct page *page, int type)
373{
374	struct f2fs_node *rn = F2FS_NODE(page);
375	return le32_to_cpu(rn->footer.flag) & (1 << type);
376}
377
378#define is_cold_node(page)	is_node(page, COLD_BIT_SHIFT)
379#define is_fsync_dnode(page)	is_node(page, FSYNC_BIT_SHIFT)
380#define is_dent_dnode(page)	is_node(page, DENT_BIT_SHIFT)
381
382static inline int is_inline_node(struct page *page)
383{
384	return PageChecked(page);
385}
386
387static inline void set_inline_node(struct page *page)
388{
389	SetPageChecked(page);
390}
391
392static inline void clear_inline_node(struct page *page)
393{
394	ClearPageChecked(page);
395}
396
397static inline void set_cold_node(struct inode *inode, struct page *page)
398{
399	struct f2fs_node *rn = F2FS_NODE(page);
400	unsigned int flag = le32_to_cpu(rn->footer.flag);
401
402	if (S_ISDIR(inode->i_mode))
403		flag &= ~(0x1 << COLD_BIT_SHIFT);
404	else
405		flag |= (0x1 << COLD_BIT_SHIFT);
406	rn->footer.flag = cpu_to_le32(flag);
407}
408
409static inline void set_mark(struct page *page, int mark, int type)
410{
411	struct f2fs_node *rn = F2FS_NODE(page);
412	unsigned int flag = le32_to_cpu(rn->footer.flag);
413	if (mark)
414		flag |= (0x1 << type);
415	else
416		flag &= ~(0x1 << type);
417	rn->footer.flag = cpu_to_le32(flag);
418}
419#define set_dentry_mark(page, mark)	set_mark(page, mark, DENT_BIT_SHIFT)
420#define set_fsync_mark(page, mark)	set_mark(page, mark, FSYNC_BIT_SHIFT)
v4.10.11
  1/*
  2 * fs/f2fs/node.h
  3 *
  4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5 *             http://www.samsung.com/
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11/* start node id of a node block dedicated to the given node id */
 12#define	START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
 13
 14/* node block offset on the NAT area dedicated to the given start node id */
 15#define	NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
 16
 17/* # of pages to perform synchronous readahead before building free nids */
 18#define FREE_NID_PAGES	8
 19#define MAX_FREE_NIDS	(NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
 20
 21#define DEF_RA_NID_PAGES	0	/* # of nid pages to be readaheaded */
 22
 23/* maximum readahead size for node during getting data blocks */
 24#define MAX_RA_NODE		128
 25
 26/* control the memory footprint threshold (10MB per 1GB ram) */
 27#define DEF_RAM_THRESHOLD	1
 28
 29/* control dirty nats ratio threshold (default: 10% over max nid count) */
 30#define DEF_DIRTY_NAT_RATIO_THRESHOLD		10
 31/* control total # of nats */
 32#define DEF_NAT_CACHE_THRESHOLD			100000
 33
 34/* vector size for gang look-up from nat cache that consists of radix tree */
 35#define NATVEC_SIZE	64
 36#define SETVEC_SIZE	32
 37
 38/* return value for read_node_page */
 39#define LOCKED_PAGE	1
 40
 41/* For flag in struct node_info */
 42enum {
 43	IS_CHECKPOINTED,	/* is it checkpointed before? */
 44	HAS_FSYNCED_INODE,	/* is the inode fsynced before? */
 45	HAS_LAST_FSYNC,		/* has the latest node fsync mark? */
 46	IS_DIRTY,		/* this nat entry is dirty? */
 47};
 48
 49/*
 50 * For node information
 51 */
 52struct node_info {
 53	nid_t nid;		/* node id */
 54	nid_t ino;		/* inode number of the node's owner */
 55	block_t	blk_addr;	/* block address of the node */
 56	unsigned char version;	/* version of the node */
 57	unsigned char flag;	/* for node information bits */
 58};
 59
 60struct nat_entry {
 61	struct list_head list;	/* for clean or dirty nat list */
 62	struct node_info ni;	/* in-memory node information */
 63};
 64
 65#define nat_get_nid(nat)		(nat->ni.nid)
 66#define nat_set_nid(nat, n)		(nat->ni.nid = n)
 67#define nat_get_blkaddr(nat)		(nat->ni.blk_addr)
 68#define nat_set_blkaddr(nat, b)		(nat->ni.blk_addr = b)
 69#define nat_get_ino(nat)		(nat->ni.ino)
 70#define nat_set_ino(nat, i)		(nat->ni.ino = i)
 71#define nat_get_version(nat)		(nat->ni.version)
 72#define nat_set_version(nat, v)		(nat->ni.version = v)
 73
 74#define inc_node_version(version)	(++version)
 75
 76static inline void copy_node_info(struct node_info *dst,
 77						struct node_info *src)
 78{
 79	dst->nid = src->nid;
 80	dst->ino = src->ino;
 81	dst->blk_addr = src->blk_addr;
 82	dst->version = src->version;
 83	/* should not copy flag here */
 84}
 85
 86static inline void set_nat_flag(struct nat_entry *ne,
 87				unsigned int type, bool set)
 88{
 89	unsigned char mask = 0x01 << type;
 90	if (set)
 91		ne->ni.flag |= mask;
 92	else
 93		ne->ni.flag &= ~mask;
 94}
 95
 96static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
 97{
 98	unsigned char mask = 0x01 << type;
 99	return ne->ni.flag & mask;
100}
101
102static inline void nat_reset_flag(struct nat_entry *ne)
103{
104	/* these states can be set only after checkpoint was done */
105	set_nat_flag(ne, IS_CHECKPOINTED, true);
106	set_nat_flag(ne, HAS_FSYNCED_INODE, false);
107	set_nat_flag(ne, HAS_LAST_FSYNC, true);
108}
109
110static inline void node_info_from_raw_nat(struct node_info *ni,
111						struct f2fs_nat_entry *raw_ne)
112{
113	ni->ino = le32_to_cpu(raw_ne->ino);
114	ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
115	ni->version = raw_ne->version;
116}
117
118static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
119						struct node_info *ni)
120{
121	raw_ne->ino = cpu_to_le32(ni->ino);
122	raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
123	raw_ne->version = ni->version;
124}
125
126static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
127{
128	return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
129					NM_I(sbi)->dirty_nats_ratio / 100;
130}
131
132static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
133{
134	return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
135}
136
137enum mem_type {
138	FREE_NIDS,	/* indicates the free nid list */
139	NAT_ENTRIES,	/* indicates the cached nat entry */
140	DIRTY_DENTS,	/* indicates dirty dentry pages */
141	INO_ENTRIES,	/* indicates inode entries */
142	EXTENT_CACHE,	/* indicates extent cache */
143	BASE_CHECK,	/* check kernel status */
144};
145
146struct nat_entry_set {
147	struct list_head set_list;	/* link with other nat sets */
148	struct list_head entry_list;	/* link with dirty nat entries */
149	nid_t set;			/* set number*/
150	unsigned int entry_cnt;		/* the # of nat entries in set */
151};
152
153/*
154 * For free nid mangement
155 */
156enum nid_state {
157	NID_NEW,	/* newly added to free nid list */
158	NID_ALLOC	/* it is allocated */
159};
160
161struct free_nid {
162	struct list_head list;	/* for free node id list */
163	nid_t nid;		/* node id */
164	int state;		/* in use or not: NID_NEW or NID_ALLOC */
165};
166
167static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
168{
169	struct f2fs_nm_info *nm_i = NM_I(sbi);
170	struct free_nid *fnid;
171
172	spin_lock(&nm_i->nid_list_lock);
173	if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
174		spin_unlock(&nm_i->nid_list_lock);
175		return;
176	}
177	fnid = list_entry(nm_i->nid_list[FREE_NID_LIST].next,
178						struct free_nid, list);
179	*nid = fnid->nid;
180	spin_unlock(&nm_i->nid_list_lock);
181}
182
183/*
184 * inline functions
185 */
186static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
187{
188	struct f2fs_nm_info *nm_i = NM_I(sbi);
189	memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
190}
191
192static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
193{
194	struct f2fs_nm_info *nm_i = NM_I(sbi);
195	pgoff_t block_off;
196	pgoff_t block_addr;
197	int seg_off;
198
199	block_off = NAT_BLOCK_OFFSET(start);
200	seg_off = block_off >> sbi->log_blocks_per_seg;
201
202	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
203		(seg_off << sbi->log_blocks_per_seg << 1) +
204		(block_off & (sbi->blocks_per_seg - 1)));
205
206	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
207		block_addr += sbi->blocks_per_seg;
208
209	return block_addr;
210}
211
212static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
213						pgoff_t block_addr)
214{
215	struct f2fs_nm_info *nm_i = NM_I(sbi);
216
217	block_addr -= nm_i->nat_blkaddr;
218	if ((block_addr >> sbi->log_blocks_per_seg) % 2)
219		block_addr -= sbi->blocks_per_seg;
220	else
221		block_addr += sbi->blocks_per_seg;
222
223	return block_addr + nm_i->nat_blkaddr;
224}
225
226static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
227{
228	unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
229
230	f2fs_change_bit(block_off, nm_i->nat_bitmap);
231}
232
233static inline nid_t ino_of_node(struct page *node_page)
234{
235	struct f2fs_node *rn = F2FS_NODE(node_page);
236	return le32_to_cpu(rn->footer.ino);
237}
238
239static inline nid_t nid_of_node(struct page *node_page)
240{
241	struct f2fs_node *rn = F2FS_NODE(node_page);
242	return le32_to_cpu(rn->footer.nid);
243}
244
245static inline unsigned int ofs_of_node(struct page *node_page)
246{
247	struct f2fs_node *rn = F2FS_NODE(node_page);
248	unsigned flag = le32_to_cpu(rn->footer.flag);
249	return flag >> OFFSET_BIT_SHIFT;
250}
251
252static inline __u64 cpver_of_node(struct page *node_page)
253{
254	struct f2fs_node *rn = F2FS_NODE(node_page);
255	return le64_to_cpu(rn->footer.cp_ver);
256}
257
258static inline block_t next_blkaddr_of_node(struct page *node_page)
259{
260	struct f2fs_node *rn = F2FS_NODE(node_page);
261	return le32_to_cpu(rn->footer.next_blkaddr);
262}
263
264static inline void fill_node_footer(struct page *page, nid_t nid,
265				nid_t ino, unsigned int ofs, bool reset)
266{
267	struct f2fs_node *rn = F2FS_NODE(page);
268	unsigned int old_flag = 0;
269
270	if (reset)
271		memset(rn, 0, sizeof(*rn));
272	else
273		old_flag = le32_to_cpu(rn->footer.flag);
274
275	rn->footer.nid = cpu_to_le32(nid);
276	rn->footer.ino = cpu_to_le32(ino);
277
278	/* should remain old flag bits such as COLD_BIT_SHIFT */
279	rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
280					(old_flag & OFFSET_BIT_MASK));
281}
282
283static inline void copy_node_footer(struct page *dst, struct page *src)
284{
285	struct f2fs_node *src_rn = F2FS_NODE(src);
286	struct f2fs_node *dst_rn = F2FS_NODE(dst);
287	memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
288}
289
290static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
291{
292	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
293	struct f2fs_node *rn = F2FS_NODE(page);
294	size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
295	__u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
296
297	if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
298		__u64 crc = le32_to_cpu(*((__le32 *)
299				((unsigned char *)ckpt + crc_offset)));
300		cp_ver |= (crc << 32);
301	}
302	rn->footer.cp_ver = cpu_to_le64(cp_ver);
303	rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
304}
305
306static inline bool is_recoverable_dnode(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
307{
308	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
309	size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
310	__u64 cp_ver = cur_cp_version(ckpt);
 
 
 
 
 
 
 
311
312	if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
313		__u64 crc = le32_to_cpu(*((__le32 *)
314				((unsigned char *)ckpt + crc_offset)));
315		cp_ver |= (crc << 32);
316	}
317	return cp_ver == cpver_of_node(page);
318}
319
320/*
321 * f2fs assigns the following node offsets described as (num).
322 * N = NIDS_PER_BLOCK
323 *
324 *  Inode block (0)
325 *    |- direct node (1)
326 *    |- direct node (2)
327 *    |- indirect node (3)
328 *    |            `- direct node (4 => 4 + N - 1)
329 *    |- indirect node (4 + N)
330 *    |            `- direct node (5 + N => 5 + 2N - 1)
331 *    `- double indirect node (5 + 2N)
332 *                 `- indirect node (6 + 2N)
333 *                       `- direct node
334 *                 ......
335 *                 `- indirect node ((6 + 2N) + x(N + 1))
336 *                       `- direct node
337 *                 ......
338 *                 `- indirect node ((6 + 2N) + (N - 1)(N + 1))
339 *                       `- direct node
340 */
341static inline bool IS_DNODE(struct page *node_page)
342{
343	unsigned int ofs = ofs_of_node(node_page);
344
345	if (f2fs_has_xattr_block(ofs))
346		return false;
347
348	if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
349			ofs == 5 + 2 * NIDS_PER_BLOCK)
350		return false;
351	if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
352		ofs -= 6 + 2 * NIDS_PER_BLOCK;
353		if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
354			return false;
355	}
356	return true;
357}
358
359static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
360{
361	struct f2fs_node *rn = F2FS_NODE(p);
362
363	f2fs_wait_on_page_writeback(p, NODE, true);
364
365	if (i)
366		rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
367	else
368		rn->in.nid[off] = cpu_to_le32(nid);
369	return set_page_dirty(p);
370}
371
372static inline nid_t get_nid(struct page *p, int off, bool i)
373{
374	struct f2fs_node *rn = F2FS_NODE(p);
375
376	if (i)
377		return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
378	return le32_to_cpu(rn->in.nid[off]);
379}
380
381/*
382 * Coldness identification:
383 *  - Mark cold files in f2fs_inode_info
384 *  - Mark cold node blocks in their node footer
385 *  - Mark cold data pages in page cache
386 */
387static inline int is_cold_data(struct page *page)
388{
389	return PageChecked(page);
390}
391
392static inline void set_cold_data(struct page *page)
393{
394	SetPageChecked(page);
395}
396
397static inline void clear_cold_data(struct page *page)
398{
399	ClearPageChecked(page);
400}
401
402static inline int is_node(struct page *page, int type)
403{
404	struct f2fs_node *rn = F2FS_NODE(page);
405	return le32_to_cpu(rn->footer.flag) & (1 << type);
406}
407
408#define is_cold_node(page)	is_node(page, COLD_BIT_SHIFT)
409#define is_fsync_dnode(page)	is_node(page, FSYNC_BIT_SHIFT)
410#define is_dent_dnode(page)	is_node(page, DENT_BIT_SHIFT)
411
412static inline int is_inline_node(struct page *page)
413{
414	return PageChecked(page);
415}
416
417static inline void set_inline_node(struct page *page)
418{
419	SetPageChecked(page);
420}
421
422static inline void clear_inline_node(struct page *page)
423{
424	ClearPageChecked(page);
425}
426
427static inline void set_cold_node(struct inode *inode, struct page *page)
428{
429	struct f2fs_node *rn = F2FS_NODE(page);
430	unsigned int flag = le32_to_cpu(rn->footer.flag);
431
432	if (S_ISDIR(inode->i_mode))
433		flag &= ~(0x1 << COLD_BIT_SHIFT);
434	else
435		flag |= (0x1 << COLD_BIT_SHIFT);
436	rn->footer.flag = cpu_to_le32(flag);
437}
438
439static inline void set_mark(struct page *page, int mark, int type)
440{
441	struct f2fs_node *rn = F2FS_NODE(page);
442	unsigned int flag = le32_to_cpu(rn->footer.flag);
443	if (mark)
444		flag |= (0x1 << type);
445	else
446		flag &= ~(0x1 << type);
447	rn->footer.flag = cpu_to_le32(flag);
448}
449#define set_dentry_mark(page, mark)	set_mark(page, mark, DENT_BIT_SHIFT)
450#define set_fsync_mark(page, mark)	set_mark(page, mark, FSYNC_BIT_SHIFT)