Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * fs/f2fs/f2fs.h
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#ifndef _LINUX_F2FS_H
  12#define _LINUX_F2FS_H
  13
  14#include <linux/types.h>
  15#include <linux/page-flags.h>
  16#include <linux/buffer_head.h>
  17#include <linux/slab.h>
  18#include <linux/crc32.h>
  19#include <linux/magic.h>
  20#include <linux/kobject.h>
  21#include <linux/sched.h>
 
 
 
 
 
  22
  23#ifdef CONFIG_F2FS_CHECK_FS
  24#define f2fs_bug_on(condition)	BUG_ON(condition)
  25#define f2fs_down_write(x, y)	down_write_nest_lock(x, y)
  26#else
  27#define f2fs_bug_on(condition)
  28#define f2fs_down_write(x, y)	down_write(x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  29#endif
  30
  31/*
  32 * For mount options
  33 */
  34#define F2FS_MOUNT_BG_GC		0x00000001
  35#define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
  36#define F2FS_MOUNT_DISCARD		0x00000004
  37#define F2FS_MOUNT_NOHEAP		0x00000008
  38#define F2FS_MOUNT_XATTR_USER		0x00000010
  39#define F2FS_MOUNT_POSIX_ACL		0x00000020
  40#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
  41#define F2FS_MOUNT_INLINE_XATTR		0x00000080
  42#define F2FS_MOUNT_INLINE_DATA		0x00000100
  43#define F2FS_MOUNT_FLUSH_MERGE		0x00000200
 
 
 
 
 
 
 
 
 
  44
  45#define clear_opt(sbi, option)	(sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
  46#define set_opt(sbi, option)	(sbi->mount_opt.opt |= F2FS_MOUNT_##option)
  47#define test_opt(sbi, option)	(sbi->mount_opt.opt & F2FS_MOUNT_##option)
  48
  49#define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
  50		typecheck(unsigned long long, b) &&			\
  51		((long long)((a) - (b)) > 0))
  52
  53typedef u32 block_t;	/*
  54			 * should not change u32, since it is the on-disk block
  55			 * address format, __le32.
  56			 */
  57typedef u32 nid_t;
  58
  59struct f2fs_mount_info {
  60	unsigned int	opt;
  61};
  62
  63#define CRCPOLY_LE 0xedb88320
 
  64
  65static inline __u32 f2fs_crc32(void *buf, size_t len)
  66{
  67	unsigned char *p = (unsigned char *)buf;
  68	__u32 crc = F2FS_SUPER_MAGIC;
  69	int i;
  70
  71	while (len--) {
  72		crc ^= *p++;
  73		for (i = 0; i < 8; i++)
  74			crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
  75	}
  76	return crc;
  77}
  78
  79static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
  80{
  81	return f2fs_crc32(buf, buf_size) == blk_crc;
  82}
  83
  84/*
  85 * For checkpoint manager
  86 */
  87enum {
  88	NAT_BITMAP,
  89	SIT_BITMAP
  90};
  91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92/*
  93 * For CP/NAT/SIT/SSA readahead
  94 */
  95enum {
  96	META_CP,
  97	META_NAT,
  98	META_SIT,
  99	META_SSA
 
 
 
 
 
 
 
 
 
 100};
 101
 102/* for the list of orphan inodes */
 103struct orphan_inode_entry {
 104	struct list_head list;	/* list head */
 105	nid_t ino;		/* inode number */
 106};
 107
 108/* for the list of directory inodes */
 109struct dir_inode_entry {
 110	struct list_head list;	/* list head */
 111	struct inode *inode;	/* vfs inode pointer */
 112};
 113
 114/* for the list of blockaddresses to be discarded */
 115struct discard_entry {
 116	struct list_head list;	/* list head */
 117	block_t blkaddr;	/* block address to be discarded */
 118	int len;		/* # of consecutive blocks of the discard */
 119};
 120
 
 
 
 
 
 
 
 121/* for the list of fsync inodes, used only during recovery */
 122struct fsync_inode_entry {
 123	struct list_head list;	/* list head */
 124	struct inode *inode;	/* vfs inode pointer */
 125	block_t blkaddr;	/* block address locating the last inode */
 
 126};
 127
 128#define nats_in_cursum(sum)		(le16_to_cpu(sum->n_nats))
 129#define sits_in_cursum(sum)		(le16_to_cpu(sum->n_sits))
 
 
 
 
 
 130
 131#define nat_in_journal(sum, i)		(sum->nat_j.entries[i].ne)
 132#define nid_in_journal(sum, i)		(sum->nat_j.entries[i].nid)
 133#define sit_in_journal(sum, i)		(sum->sit_j.entries[i].se)
 134#define segno_in_journal(sum, i)	(sum->sit_j.entries[i].segno)
 135
 136static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
 137{
 138	int before = nats_in_cursum(rs);
 139	rs->n_nats = cpu_to_le16(before + i);
 140	return before;
 141}
 142
 143static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
 144{
 145	int before = sits_in_cursum(rs);
 146	rs->n_sits = cpu_to_le16(before + i);
 147	return before;
 148}
 149
 
 
 
 
 
 
 
 
 150/*
 151 * ioctl commands
 152 */
 153#define F2FS_IOC_GETFLAGS               FS_IOC_GETFLAGS
 154#define F2FS_IOC_SETFLAGS               FS_IOC_SETFLAGS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 155
 156#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 157/*
 158 * ioctl commands in 32 bit emulation
 159 */
 160#define F2FS_IOC32_GETFLAGS             FS_IOC32_GETFLAGS
 161#define F2FS_IOC32_SETFLAGS             FS_IOC32_SETFLAGS
 
 162#endif
 163
 
 
 
 
 
 
 
 
 
 
 
 
 164/*
 165 * For INODE and NODE manager
 166 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 167/*
 168 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
 169 * as its node offset to distinguish from index node blocks.
 170 * But some bits are used to mark the node block.
 171 */
 172#define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
 173				>> OFFSET_BIT_SHIFT)
 174enum {
 175	ALLOC_NODE,			/* allocate a new node page if needed */
 176	LOOKUP_NODE,			/* look up a node without readahead */
 177	LOOKUP_NODE_RA,			/*
 178					 * look up a node with readahead called
 179					 * by get_data_block.
 180					 */
 181};
 182
 183#define F2FS_LINK_MAX		32000	/* maximum link count per file */
 
 
 
 
 
 184
 185/* for in-memory extent cache entry */
 186#define F2FS_MIN_EXTENT_LEN	16	/* minimum extent length */
 
 
 
 187
 188struct extent_info {
 189	rwlock_t ext_lock;	/* rwlock for consistency */
 190	unsigned int fofs;	/* start offset in a file */
 191	u32 blk_addr;		/* start block address of the extent */
 192	unsigned int len;	/* length of the extent */
 193};
 194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195/*
 196 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
 197 */
 198#define FADVISE_COLD_BIT	0x01
 199#define FADVISE_LOST_PINO_BIT	0x02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200
 201#define DEF_DIR_LEVEL		0
 202
 203struct f2fs_inode_info {
 204	struct inode vfs_inode;		/* serve a vfs inode */
 205	unsigned long i_flags;		/* keep an inode flags for ioctl */
 206	unsigned char i_advise;		/* use to give file attribute hints */
 207	unsigned char i_dir_level;	/* use for dentry level for large dir */
 208	unsigned int i_current_depth;	/* use only in directory structure */
 209	unsigned int i_pino;		/* parent inode number */
 210	umode_t i_acl_mode;		/* keep file acl mode temporarily */
 211
 212	/* Use below internally in f2fs*/
 213	unsigned long flags;		/* use to pass per-file flags */
 214	struct rw_semaphore i_sem;	/* protect fi info */
 215	atomic_t dirty_dents;		/* # of dirty dentry pages */
 216	f2fs_hash_t chash;		/* hash value of given file name */
 217	unsigned int clevel;		/* maximum level of given file name */
 
 218	nid_t i_xattr_nid;		/* node id that contains xattrs */
 219	unsigned long long xattr_ver;	/* cp version of xattr modification */
 220	struct extent_info ext;		/* in-memory extent cache entry */
 
 
 
 
 
 
 
 221};
 222
 223static inline void get_extent_info(struct extent_info *ext,
 224					struct f2fs_extent i_ext)
 225{
 226	write_lock(&ext->ext_lock);
 227	ext->fofs = le32_to_cpu(i_ext.fofs);
 228	ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
 229	ext->len = le32_to_cpu(i_ext.len);
 230	write_unlock(&ext->ext_lock);
 231}
 232
 233static inline void set_raw_extent(struct extent_info *ext,
 234					struct f2fs_extent *i_ext)
 235{
 236	read_lock(&ext->ext_lock);
 237	i_ext->fofs = cpu_to_le32(ext->fofs);
 238	i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
 239	i_ext->len = cpu_to_le32(ext->len);
 240	read_unlock(&ext->ext_lock);
 241}
 242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243struct f2fs_nm_info {
 244	block_t nat_blkaddr;		/* base disk address of NAT */
 245	nid_t max_nid;			/* maximum possible node ids */
 
 246	nid_t next_scan_nid;		/* the next nid to be scanned */
 247	unsigned int ram_thresh;	/* control the memory footprint */
 
 
 248
 249	/* NAT cache management */
 250	struct radix_tree_root nat_root;/* root of the nat entry cache */
 251	rwlock_t nat_tree_lock;		/* protect nat_tree_lock */
 252	unsigned int nat_cnt;		/* the # of cached nat entries */
 253	struct list_head nat_entries;	/* cached nat entry list (clean) */
 254	struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
 
 255
 256	/* free node ids management */
 257	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
 258	struct list_head free_nid_list;	/* a list for free nids */
 259	spinlock_t free_nid_list_lock;	/* protect free nid list */
 260	unsigned int fcnt;		/* the number of free node id */
 261	struct mutex build_lock;	/* lock for build free nids */
 262
 263	/* for checkpoint */
 264	char *nat_bitmap;		/* NAT bitmap pointer */
 265	int bitmap_size;		/* bitmap size */
 266};
 267
 268/*
 269 * this structure is used as one of function parameters.
 270 * all the information are dedicated to a given direct node block determined
 271 * by the data offset in a file.
 272 */
 273struct dnode_of_data {
 274	struct inode *inode;		/* vfs inode pointer */
 275	struct page *inode_page;	/* its inode page, NULL is possible */
 276	struct page *node_page;		/* cached direct node page */
 277	nid_t nid;			/* node id of the direct node block */
 278	unsigned int ofs_in_node;	/* data offset in the node page */
 279	bool inode_page_locked;		/* inode page is locked or not */
 
 
 
 280	block_t	data_blkaddr;		/* block address of the node block */
 281};
 282
 283static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
 284		struct page *ipage, struct page *npage, nid_t nid)
 285{
 286	memset(dn, 0, sizeof(*dn));
 287	dn->inode = inode;
 288	dn->inode_page = ipage;
 289	dn->node_page = npage;
 290	dn->nid = nid;
 291}
 292
 293/*
 294 * For SIT manager
 295 *
 296 * By default, there are 6 active log areas across the whole main area.
 297 * When considering hot and cold data separation to reduce cleaning overhead,
 298 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
 299 * respectively.
 300 * In the current design, you should not change the numbers intentionally.
 301 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
 302 * logs individually according to the underlying devices. (default: 6)
 303 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
 304 * data and 8 for node logs.
 305 */
 306#define	NR_CURSEG_DATA_TYPE	(3)
 307#define NR_CURSEG_NODE_TYPE	(3)
 308#define NR_CURSEG_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
 309
 310enum {
 311	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
 312	CURSEG_WARM_DATA,	/* data blocks */
 313	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
 314	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
 315	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
 316	CURSEG_COLD_NODE,	/* indirect node blocks */
 317	NO_CHECK_TYPE
 318};
 319
 320struct flush_cmd {
 321	struct flush_cmd *next;
 322	struct completion wait;
 
 323	int ret;
 324};
 325
 
 
 
 
 
 
 
 
 326struct f2fs_sm_info {
 327	struct sit_info *sit_info;		/* whole segment information */
 328	struct free_segmap_info *free_info;	/* free segment information */
 329	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
 330	struct curseg_info *curseg_array;	/* active segment information */
 331
 332	struct list_head wblist_head;	/* list of under-writeback pages */
 333	spinlock_t wblist_lock;		/* lock for checkpoint */
 334
 335	block_t seg0_blkaddr;		/* block address of 0'th segment */
 336	block_t main_blkaddr;		/* start block address of main area */
 337	block_t ssa_blkaddr;		/* start block address of SSA area */
 338
 339	unsigned int segment_count;	/* total # of segments */
 340	unsigned int main_segments;	/* # of segments in main area */
 341	unsigned int reserved_segments;	/* # of reserved segments */
 342	unsigned int ovp_segments;	/* # of overprovision segments */
 343
 344	/* a threshold to reclaim prefree segments */
 345	unsigned int rec_prefree_segments;
 346
 347	/* for small discard management */
 348	struct list_head discard_list;		/* 4KB discard list */
 
 349	int nr_discards;			/* # of discards in the list */
 350	int max_discards;			/* max. discards to be issued */
 351
 
 
 
 
 
 352	unsigned int ipu_policy;	/* in-place-update policy */
 353	unsigned int min_ipu_util;	/* in-place-update threshold */
 
 354
 355	/* for flush command control */
 356	struct task_struct *f2fs_issue_flush;	/* flush thread */
 357	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
 358	struct flush_cmd *issue_list;		/* list for command issue */
 359	struct flush_cmd *dispatch_list;	/* list for command dispatch */
 360	spinlock_t issue_lock;			/* for issue list lock */
 361	struct flush_cmd *issue_tail;		/* list tail of issue list */
 362};
 363
 364/*
 365 * For superblock
 366 */
 367/*
 368 * COUNT_TYPE for monitoring
 369 *
 370 * f2fs monitors the number of several block types such as on-writeback,
 371 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
 372 */
 
 373enum count_type {
 374	F2FS_WRITEBACK,
 375	F2FS_DIRTY_DENTS,
 
 376	F2FS_DIRTY_NODES,
 377	F2FS_DIRTY_META,
 
 
 
 
 378	NR_COUNT_TYPE,
 379};
 380
 381/*
 382 * The below are the page types of bios used in submti_bio().
 383 * The available types are:
 384 * DATA			User data pages. It operates as async mode.
 385 * NODE			Node pages. It operates as async mode.
 386 * META			FS metadata pages such as SIT, NAT, CP.
 387 * NR_PAGE_TYPE		The number of page types.
 388 * META_FLUSH		Make sure the previous pages are written
 389 *			with waiting the bio's completion
 390 * ...			Only can be used with META.
 391 */
 392#define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
 393enum page_type {
 394	DATA,
 395	NODE,
 396	META,
 397	NR_PAGE_TYPE,
 398	META_FLUSH,
 
 
 
 
 
 399};
 400
 401struct f2fs_io_info {
 
 402	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
 403	int rw;			/* contains R/RS/W/WS with REQ_META/REQ_PRIO */
 
 
 
 
 
 404};
 405
 406#define is_read_io(rw)	(((rw) & 1) == READ)
 407struct f2fs_bio_info {
 408	struct f2fs_sb_info *sbi;	/* f2fs superblock */
 409	struct bio *bio;		/* bios to merge */
 410	sector_t last_block_in_bio;	/* last block number */
 411	struct f2fs_io_info fio;	/* store buffered io info. */
 412	struct rw_semaphore io_rwsem;	/* blocking op for bio */
 413};
 414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 415struct f2fs_sb_info {
 416	struct super_block *sb;			/* pointer to VFS super block */
 417	struct proc_dir_entry *s_proc;		/* proc entry */
 418	struct buffer_head *raw_super_buf;	/* buffer head of raw sb */
 419	struct f2fs_super_block *raw_super;	/* raw super block pointer */
 420	int s_dirty;				/* dirty flag for checkpoint */
 
 
 
 
 
 
 
 
 
 
 
 421
 422	/* for node-related operations */
 423	struct f2fs_nm_info *nm_info;		/* node manager */
 424	struct inode *node_inode;		/* cache node blocks */
 425
 426	/* for segment-related operations */
 427	struct f2fs_sm_info *sm_info;		/* segment manager */
 428
 429	/* for bio operations */
 430	struct f2fs_bio_info read_io;			/* for read bios */
 431	struct f2fs_bio_info write_io[NR_PAGE_TYPE];	/* for write bios */
 432	struct completion *wait_io;		/* for completion bios */
 433
 434	/* for checkpoint */
 435	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
 
 
 436	struct inode *meta_inode;		/* cache meta blocks */
 437	struct mutex cp_mutex;			/* checkpoint procedure lock */
 438	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
 439	struct mutex node_write;		/* locking node writes */
 440	struct mutex writepages;		/* mutex for writepages() */
 441	bool por_doing;				/* recovery is doing or not */
 442	wait_queue_head_t cp_wait;
 
 
 
 
 443
 444	/* for orphan inode management */
 445	struct list_head orphan_inode_list;	/* orphan inode list */
 446	spinlock_t orphan_inode_lock;		/* for orphan inode list */
 447	unsigned int n_orphans;			/* # of orphan inodes */
 448	unsigned int max_orphans;		/* max orphan inodes */
 449
 450	/* for directory inode management */
 451	struct list_head dir_inode_list;	/* dir inode list */
 452	spinlock_t dir_inode_lock;		/* for dir inode list lock */
 
 
 
 
 
 
 
 
 
 
 453
 454	/* basic file system units */
 455	unsigned int log_sectors_per_block;	/* log2 sectors per block */
 456	unsigned int log_blocksize;		/* log2 block size */
 457	unsigned int blocksize;			/* block size */
 458	unsigned int root_ino_num;		/* root inode number*/
 459	unsigned int node_ino_num;		/* node inode number*/
 460	unsigned int meta_ino_num;		/* meta inode number*/
 461	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
 462	unsigned int blocks_per_seg;		/* blocks per segment */
 463	unsigned int segs_per_sec;		/* segments per section */
 464	unsigned int secs_per_zone;		/* sections per zone */
 465	unsigned int total_sections;		/* total section count */
 466	unsigned int total_node_count;		/* total node block count */
 467	unsigned int total_valid_node_count;	/* valid node block count */
 468	unsigned int total_valid_inode_count;	/* valid inode count */
 469	int active_logs;			/* # of active logs */
 470	int dir_level;				/* directory level */
 471
 472	block_t user_block_count;		/* # of user blocks */
 473	block_t total_valid_block_count;	/* # of valid blocks */
 474	block_t alloc_valid_block_count;	/* # of allocated blocks */
 475	block_t last_valid_block_count;		/* for recovery */
 476	u32 s_next_generation;			/* for NFS support */
 477	atomic_t nr_pages[NR_COUNT_TYPE];	/* # of pages, see count_type */
 
 
 
 
 
 
 
 478
 479	struct f2fs_mount_info mount_opt;	/* mount options */
 480
 481	/* for cleaning operations */
 482	struct mutex gc_mutex;			/* mutex for GC */
 483	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
 484	unsigned int cur_victim_sec;		/* current victim section num */
 485
 
 
 
 486	/* maximum # of trials to find a victim segment for SSR and GC */
 487	unsigned int max_victim_search;
 488
 489	/*
 490	 * for stat information.
 491	 * one is for the LFS mode, and the other is for the SSR mode.
 492	 */
 493#ifdef CONFIG_F2FS_STAT_FS
 494	struct f2fs_stat_info *stat_info;	/* FS status information */
 495	unsigned int segment_count[2];		/* # of allocated segments */
 496	unsigned int block_count[2];		/* # of allocated blocks */
 497	int total_hit_ext, read_hit_ext;	/* extent cache hit ratio */
 498	int inline_inode;			/* # of inline_data inodes */
 
 
 
 
 
 
 499	int bg_gc;				/* background gc calls */
 500	unsigned int n_dirty_dirs;		/* # of dir inodes */
 501#endif
 502	unsigned int last_victim[2];		/* last victim segment # */
 503	spinlock_t stat_lock;			/* lock for stat operations */
 504
 505	/* For sysfs suppport */
 506	struct kobject s_kobj;
 507	struct completion s_kobj_unregister;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508};
 509
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 510/*
 511 * Inline functions
 512 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
 514{
 515	return container_of(inode, struct f2fs_inode_info, vfs_inode);
 516}
 517
 518static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
 519{
 520	return sb->s_fs_info;
 521}
 522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
 524{
 525	return (struct f2fs_super_block *)(sbi->raw_super);
 526}
 527
 528static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
 529{
 530	return (struct f2fs_checkpoint *)(sbi->ckpt);
 531}
 532
 533static inline struct f2fs_node *F2FS_NODE(struct page *page)
 534{
 535	return (struct f2fs_node *)page_address(page);
 536}
 537
 538static inline struct f2fs_inode *F2FS_INODE(struct page *page)
 539{
 540	return &((struct f2fs_node *)page_address(page))->i;
 541}
 542
 543static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
 544{
 545	return (struct f2fs_nm_info *)(sbi->nm_info);
 546}
 547
 548static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
 549{
 550	return (struct f2fs_sm_info *)(sbi->sm_info);
 551}
 552
 553static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
 554{
 555	return (struct sit_info *)(SM_I(sbi)->sit_info);
 556}
 557
 558static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
 559{
 560	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
 561}
 562
 563static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
 564{
 565	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
 566}
 567
 568static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
 569{
 570	return sbi->meta_inode->i_mapping;
 571}
 572
 573static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
 574{
 575	return sbi->node_inode->i_mapping;
 576}
 577
 578static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
 
 
 
 
 
 579{
 580	sbi->s_dirty = 1;
 581}
 582
 583static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
 584{
 585	sbi->s_dirty = 0;
 586}
 587
 588static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
 589{
 590	return le64_to_cpu(cp->checkpoint_ver);
 591}
 592
 593static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 594{
 595	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
 
 596	return ckpt_flags & f;
 597}
 598
 599static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 600{
 601	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
 
 
 
 
 
 
 
 602	ckpt_flags |= f;
 603	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 604}
 605
 606static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 607{
 608	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
 
 
 
 
 
 
 
 
 
 609	ckpt_flags &= (~f);
 610	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 611}
 612
 
 
 
 
 
 
 
 613static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
 614{
 615	down_read(&sbi->cp_rwsem);
 616}
 617
 618static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
 619{
 620	up_read(&sbi->cp_rwsem);
 621}
 622
 623static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 624{
 625	f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex);
 626}
 627
 628static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 629{
 630	up_write(&sbi->cp_rwsem);
 631}
 632
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633/*
 634 * Check whether the given nid is within node id range.
 635 */
 636static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 637{
 638	WARN_ON((nid >= NM_I(sbi)->max_nid));
 
 639	if (unlikely(nid >= NM_I(sbi)->max_nid))
 640		return -EINVAL;
 641	return 0;
 642}
 643
 644#define F2FS_DEFAULT_ALLOCATED_BLOCKS	1
 645
 646/*
 647 * Check whether the inode has blocks or not
 648 */
 649static inline int F2FS_HAS_BLOCKS(struct inode *inode)
 650{
 651	if (F2FS_I(inode)->i_xattr_nid)
 652		return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1;
 653	else
 654		return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS;
 655}
 656
 657static inline bool f2fs_has_xattr_block(unsigned int ofs)
 658{
 659	return ofs == XATTR_NODE_OFFSET;
 660}
 661
 
 662static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
 663				 struct inode *inode, blkcnt_t count)
 664{
 665	block_t	valid_block_count;
 666
 667	spin_lock(&sbi->stat_lock);
 668	valid_block_count =
 669		sbi->total_valid_block_count + (block_t)count;
 670	if (unlikely(valid_block_count > sbi->user_block_count)) {
 671		spin_unlock(&sbi->stat_lock);
 672		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673	}
 674	inode->i_blocks += count;
 675	sbi->total_valid_block_count = valid_block_count;
 676	sbi->alloc_valid_block_count += (block_t)count;
 677	spin_unlock(&sbi->stat_lock);
 
 
 678	return true;
 679}
 680
 681static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
 682						struct inode *inode,
 683						blkcnt_t count)
 684{
 685	spin_lock(&sbi->stat_lock);
 686	f2fs_bug_on(sbi->total_valid_block_count < (block_t) count);
 687	f2fs_bug_on(inode->i_blocks < count);
 688	inode->i_blocks -= count;
 689	sbi->total_valid_block_count -= (block_t)count;
 690	spin_unlock(&sbi->stat_lock);
 
 691}
 692
 693static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
 694{
 695	atomic_inc(&sbi->nr_pages[count_type]);
 696	F2FS_SET_SB_DIRT(sbi);
 
 
 
 
 
 697}
 698
 699static inline void inode_inc_dirty_dents(struct inode *inode)
 700{
 701	inc_page_count(F2FS_SB(inode->i_sb), F2FS_DIRTY_DENTS);
 702	atomic_inc(&F2FS_I(inode)->dirty_dents);
 
 703}
 704
 705static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
 706{
 707	atomic_dec(&sbi->nr_pages[count_type]);
 708}
 709
 710static inline void inode_dec_dirty_dents(struct inode *inode)
 711{
 712	if (!S_ISDIR(inode->i_mode))
 
 713		return;
 714
 715	dec_page_count(F2FS_SB(inode->i_sb), F2FS_DIRTY_DENTS);
 716	atomic_dec(&F2FS_I(inode)->dirty_dents);
 
 717}
 718
 719static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
 720{
 721	return atomic_read(&sbi->nr_pages[count_type]);
 722}
 723
 724static inline int get_dirty_dents(struct inode *inode)
 725{
 726	return atomic_read(&F2FS_I(inode)->dirty_dents);
 727}
 728
 729static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
 730{
 731	unsigned int pages_per_sec = sbi->segs_per_sec *
 732					(1 << sbi->log_blocks_per_seg);
 733	return ((get_pages(sbi, block_type) + pages_per_sec - 1)
 734			>> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
 
 735}
 736
 737static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
 738{
 739	return sbi->total_valid_block_count;
 740}
 741
 
 
 
 
 
 742static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
 743{
 744	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 745
 746	/* return NAT or SIT bitmap */
 747	if (flag == NAT_BITMAP)
 748		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
 749	else if (flag == SIT_BITMAP)
 750		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
 751
 752	return 0;
 753}
 754
 
 
 
 
 
 755static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
 756{
 757	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 758	int offset = (flag == NAT_BITMAP) ?
 
 
 
 
 
 
 
 
 759			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
 760	return &ckpt->sit_nat_version_bitmap + offset;
 
 761}
 762
 763static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
 764{
 765	block_t start_addr;
 766	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 767	unsigned long long ckpt_version = cur_cp_version(ckpt);
 768
 769	start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
 770
 771	/*
 772	 * odd numbered checkpoint should at cp segment 0
 773	 * and even segent must be at cp segment 1
 774	 */
 775	if (!(ckpt_version & 1))
 776		start_addr += sbi->blocks_per_seg;
 
 
 
 
 
 
 777
 
 
 778	return start_addr;
 779}
 780
 
 
 
 
 
 781static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
 782{
 783	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
 784}
 785
 786static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
 787						struct inode *inode)
 788{
 789	block_t	valid_block_count;
 790	unsigned int valid_node_count;
 791
 792	spin_lock(&sbi->stat_lock);
 793
 794	valid_block_count = sbi->total_valid_block_count + 1;
 795	if (unlikely(valid_block_count > sbi->user_block_count)) {
 796		spin_unlock(&sbi->stat_lock);
 797		return false;
 798	}
 799
 800	valid_node_count = sbi->total_valid_node_count + 1;
 801	if (unlikely(valid_node_count > sbi->total_node_count)) {
 802		spin_unlock(&sbi->stat_lock);
 803		return false;
 804	}
 805
 806	if (inode)
 807		inode->i_blocks++;
 808
 809	sbi->alloc_valid_block_count++;
 810	sbi->total_valid_node_count++;
 811	sbi->total_valid_block_count++;
 812	spin_unlock(&sbi->stat_lock);
 813
 
 814	return true;
 815}
 816
 817static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
 818						struct inode *inode)
 819{
 820	spin_lock(&sbi->stat_lock);
 821
 822	f2fs_bug_on(!sbi->total_valid_block_count);
 823	f2fs_bug_on(!sbi->total_valid_node_count);
 824	f2fs_bug_on(!inode->i_blocks);
 825
 826	inode->i_blocks--;
 827	sbi->total_valid_node_count--;
 828	sbi->total_valid_block_count--;
 829
 830	spin_unlock(&sbi->stat_lock);
 831}
 832
 833static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
 834{
 835	return sbi->total_valid_node_count;
 836}
 837
 838static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
 839{
 840	spin_lock(&sbi->stat_lock);
 841	f2fs_bug_on(sbi->total_valid_inode_count == sbi->total_node_count);
 842	sbi->total_valid_inode_count++;
 843	spin_unlock(&sbi->stat_lock);
 844}
 845
 846static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
 847{
 848	spin_lock(&sbi->stat_lock);
 849	f2fs_bug_on(!sbi->total_valid_inode_count);
 850	sbi->total_valid_inode_count--;
 851	spin_unlock(&sbi->stat_lock);
 852}
 853
 854static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
 855{
 856	return sbi->total_valid_inode_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857}
 858
 859static inline void f2fs_put_page(struct page *page, int unlock)
 860{
 861	if (!page)
 862		return;
 863
 864	if (unlock) {
 865		f2fs_bug_on(!PageLocked(page));
 866		unlock_page(page);
 867	}
 868	page_cache_release(page);
 869}
 870
 871static inline void f2fs_put_dnode(struct dnode_of_data *dn)
 872{
 873	if (dn->node_page)
 874		f2fs_put_page(dn->node_page, 1);
 875	if (dn->inode_page && dn->node_page != dn->inode_page)
 876		f2fs_put_page(dn->inode_page, 0);
 877	dn->node_page = NULL;
 878	dn->inode_page = NULL;
 879}
 880
 881static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
 882					size_t size)
 883{
 884	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
 885}
 886
 887static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
 888						gfp_t flags)
 889{
 890	void *entry;
 891retry:
 892	entry = kmem_cache_alloc(cachep, flags);
 893	if (!entry) {
 894		cond_resched();
 895		goto retry;
 896	}
 897
 
 
 
 898	return entry;
 899}
 900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 901#define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
 902
 903static inline bool IS_INODE(struct page *page)
 904{
 905	struct f2fs_node *p = F2FS_NODE(page);
 906	return RAW_IS_INODE(p);
 907}
 908
 909static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
 910{
 911	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
 912}
 913
 914static inline block_t datablock_addr(struct page *node_page,
 915		unsigned int offset)
 916{
 917	struct f2fs_node *raw_node;
 918	__le32 *addr_array;
 919	raw_node = F2FS_NODE(node_page);
 920	addr_array = blkaddr_in_node(raw_node);
 921	return le32_to_cpu(addr_array[offset]);
 922}
 923
 924static inline int f2fs_test_bit(unsigned int nr, char *addr)
 925{
 926	int mask;
 927
 928	addr += (nr >> 3);
 929	mask = 1 << (7 - (nr & 0x07));
 930	return mask & *addr;
 931}
 932
 933static inline int f2fs_set_bit(unsigned int nr, char *addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 934{
 935	int mask;
 936	int ret;
 937
 938	addr += (nr >> 3);
 939	mask = 1 << (7 - (nr & 0x07));
 940	ret = mask & *addr;
 941	*addr |= mask;
 942	return ret;
 943}
 944
 945static inline int f2fs_clear_bit(unsigned int nr, char *addr)
 946{
 947	int mask;
 948	int ret;
 949
 950	addr += (nr >> 3);
 951	mask = 1 << (7 - (nr & 0x07));
 952	ret = mask & *addr;
 953	*addr &= ~mask;
 954	return ret;
 955}
 956
 
 
 
 
 
 
 
 
 
 957/* used for f2fs_inode_info->flags */
 958enum {
 959	FI_NEW_INODE,		/* indicate newly allocated inode */
 960	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
 
 
 961	FI_INC_LINK,		/* need to increment i_nlink */
 962	FI_ACL_MODE,		/* indicate acl mode */
 963	FI_NO_ALLOC,		/* should not allocate any blocks */
 964	FI_UPDATE_DIR,		/* should update inode block for consistency */
 965	FI_DELAY_IPUT,		/* used for the recovery */
 966	FI_NO_EXTENT,		/* not to use the extent cache */
 967	FI_INLINE_XATTR,	/* used for inline xattr */
 968	FI_INLINE_DATA,		/* used for inline data*/
 969};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970
 971static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
 972{
 973	set_bit(flag, &fi->flags);
 
 
 974}
 975
 976static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
 977{
 978	return test_bit(flag, &fi->flags);
 979}
 980
 981static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
 982{
 983	clear_bit(flag, &fi->flags);
 
 
 984}
 985
 986static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
 987{
 988	fi->i_acl_mode = mode;
 989	set_inode_flag(fi, FI_ACL_MODE);
 
 990}
 991
 992static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
 993{
 994	if (is_inode_flag_set(fi, FI_ACL_MODE)) {
 995		clear_inode_flag(fi, FI_ACL_MODE);
 996		return 1;
 997	}
 998	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999}
1000
1001static inline void get_inline_info(struct f2fs_inode_info *fi,
1002					struct f2fs_inode *ri)
1003{
 
 
 
 
 
 
 
 
1004	if (ri->i_inline & F2FS_INLINE_XATTR)
1005		set_inode_flag(fi, FI_INLINE_XATTR);
1006	if (ri->i_inline & F2FS_INLINE_DATA)
1007		set_inode_flag(fi, FI_INLINE_DATA);
 
 
 
 
 
 
1008}
1009
1010static inline void set_raw_inline(struct f2fs_inode_info *fi,
1011					struct f2fs_inode *ri)
1012{
1013	ri->i_inline = 0;
1014
1015	if (is_inode_flag_set(fi, FI_INLINE_XATTR))
1016		ri->i_inline |= F2FS_INLINE_XATTR;
1017	if (is_inode_flag_set(fi, FI_INLINE_DATA))
1018		ri->i_inline |= F2FS_INLINE_DATA;
 
 
 
 
 
 
1019}
1020
1021static inline int f2fs_has_inline_xattr(struct inode *inode)
1022{
1023	return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
1024}
1025
1026static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
1027{
1028	if (f2fs_has_inline_xattr(&fi->vfs_inode))
1029		return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
1030	return DEF_ADDRS_PER_INODE;
1031}
1032
1033static inline void *inline_xattr_addr(struct page *page)
1034{
1035	struct f2fs_inode *ri = F2FS_INODE(page);
1036	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
1037					F2FS_INLINE_XATTR_ADDRS]);
1038}
1039
1040static inline int inline_xattr_size(struct inode *inode)
1041{
1042	if (f2fs_has_inline_xattr(inode))
1043		return F2FS_INLINE_XATTR_ADDRS << 2;
1044	else
1045		return 0;
1046}
1047
1048static inline int f2fs_has_inline_data(struct inode *inode)
1049{
1050	return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1051}
1052
1053static inline void *inline_data_addr(struct page *page)
1054{
1055	struct f2fs_inode *ri = F2FS_INODE(page);
1056	return (void *)&(ri->i_addr[1]);
1057}
1058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059static inline int f2fs_readonly(struct super_block *sb)
1060{
1061	return sb->s_flags & MS_RDONLY;
1062}
1063
1064static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065{
1066	set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
1067	sbi->sb->s_flags |= MS_RDONLY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068}
1069
1070#define get_inode_mode(i) \
1071	((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
1072	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
1073
 
 
 
 
 
 
1074/*
1075 * file.c
1076 */
1077int f2fs_sync_file(struct file *, loff_t, loff_t, int);
1078void truncate_data_blocks(struct dnode_of_data *);
1079int truncate_blocks(struct inode *, u64);
1080void f2fs_truncate(struct inode *);
1081int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
1082int f2fs_setattr(struct dentry *, struct iattr *);
1083int truncate_hole(struct inode *, pgoff_t, pgoff_t);
1084int truncate_data_blocks_range(struct dnode_of_data *, int);
1085long f2fs_ioctl(struct file *, unsigned int, unsigned long);
1086long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
1087
1088/*
1089 * inode.c
1090 */
1091void f2fs_set_inode_flags(struct inode *);
1092struct inode *f2fs_iget(struct super_block *, unsigned long);
 
1093int try_to_free_nats(struct f2fs_sb_info *, int);
1094void update_inode(struct inode *, struct page *);
1095void update_inode_page(struct inode *);
1096int f2fs_write_inode(struct inode *, struct writeback_control *);
1097void f2fs_evict_inode(struct inode *);
 
1098
1099/*
1100 * namei.c
1101 */
1102struct dentry *f2fs_get_parent(struct dentry *child);
1103
1104/*
1105 * dir.c
1106 */
1107struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1108							struct page **);
1109struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
1110ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
1111void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
1112				struct page *, struct inode *);
1113int update_dent_inode(struct inode *, const struct qstr *);
1114int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
1115void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
1116int f2fs_make_empty(struct inode *, struct inode *);
 
 
 
 
 
 
 
 
1117bool f2fs_empty_dir(struct inode *);
1118
1119static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
1120{
1121	return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
1122				inode);
1123}
1124
1125/*
1126 * super.c
1127 */
 
 
 
1128int f2fs_sync_fs(struct super_block *, int);
1129extern __printf(3, 4)
1130void f2fs_msg(struct super_block *, const char *, const char *, ...);
 
1131
1132/*
1133 * hash.c
1134 */
1135f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
1136
1137/*
1138 * node.c
1139 */
1140struct dnode_of_data;
1141struct node_info;
1142
1143int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
1144bool fsync_mark_done(struct f2fs_sb_info *, nid_t);
 
 
1145void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
 
1146int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
1147int truncate_inode_blocks(struct inode *, pgoff_t);
1148int truncate_xattr_node(struct inode *, struct page *);
1149int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
1150void remove_inode_page(struct inode *);
1151struct page *new_inode_page(struct inode *, const struct qstr *);
1152struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
1153void ra_node_page(struct f2fs_sb_info *, nid_t);
1154struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
1155struct page *get_node_page_ra(struct page *, int);
1156void sync_inode_page(struct dnode_of_data *);
1157int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
 
 
 
1158bool alloc_nid(struct f2fs_sb_info *, nid_t *);
1159void alloc_nid_done(struct f2fs_sb_info *, nid_t);
1160void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
1161void recover_node_page(struct f2fs_sb_info *, struct page *,
1162		struct f2fs_summary *, struct node_info *, block_t);
1163bool recover_xattr_data(struct inode *, struct page *, block_t);
1164int recover_inode_page(struct f2fs_sb_info *, struct page *);
1165int restore_node_summary(struct f2fs_sb_info *, unsigned int,
1166				struct f2fs_summary_block *);
1167void flush_nat_entries(struct f2fs_sb_info *);
1168int build_node_manager(struct f2fs_sb_info *);
1169void destroy_node_manager(struct f2fs_sb_info *);
1170int __init create_node_manager_caches(void);
1171void destroy_node_manager_caches(void);
1172
1173/*
1174 * segment.c
1175 */
1176void f2fs_balance_fs(struct f2fs_sb_info *);
 
 
 
1177void f2fs_balance_fs_bg(struct f2fs_sb_info *);
1178int f2fs_issue_flush(struct f2fs_sb_info *);
 
 
1179void invalidate_blocks(struct f2fs_sb_info *, block_t);
 
1180void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
1181void clear_prefree_segments(struct f2fs_sb_info *);
1182int npages_for_summary_flush(struct f2fs_sb_info *);
 
 
1183void allocate_new_segments(struct f2fs_sb_info *);
 
1184struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
 
1185void write_meta_page(struct f2fs_sb_info *, struct page *);
1186void write_node_page(struct f2fs_sb_info *, struct page *,
1187		struct f2fs_io_info *, unsigned int, block_t, block_t *);
1188void write_data_page(struct page *, struct dnode_of_data *, block_t *,
1189					struct f2fs_io_info *);
1190void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
1191void recover_data_page(struct f2fs_sb_info *, struct page *,
1192				struct f2fs_summary *, block_t, block_t);
1193void rewrite_node_page(struct f2fs_sb_info *, struct page *,
1194				struct f2fs_summary *, block_t, block_t);
1195void allocate_data_block(struct f2fs_sb_info *, struct page *,
1196		block_t, block_t *, struct f2fs_summary *, int);
1197void f2fs_wait_on_page_writeback(struct page *, enum page_type);
 
1198void write_data_summaries(struct f2fs_sb_info *, block_t);
1199void write_node_summaries(struct f2fs_sb_info *, block_t);
1200int lookup_journal_in_cursum(struct f2fs_summary_block *,
1201					int, unsigned int, int);
1202void flush_sit_entries(struct f2fs_sb_info *);
1203int build_segment_manager(struct f2fs_sb_info *);
1204void destroy_segment_manager(struct f2fs_sb_info *);
1205int __init create_segment_manager_caches(void);
1206void destroy_segment_manager_caches(void);
1207
1208/*
1209 * checkpoint.c
1210 */
 
1211struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
1212struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
1213int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
 
 
 
1214long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
 
 
 
 
 
1215int acquire_orphan_inode(struct f2fs_sb_info *);
1216void release_orphan_inode(struct f2fs_sb_info *);
1217void add_orphan_inode(struct f2fs_sb_info *, nid_t);
1218void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
1219void recover_orphan_inodes(struct f2fs_sb_info *);
1220int get_valid_checkpoint(struct f2fs_sb_info *);
1221void set_dirty_dir_page(struct inode *, struct page *);
1222void add_dirty_dir_inode(struct inode *);
1223void remove_dirty_dir_inode(struct inode *);
1224struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t);
1225void sync_dirty_dir_inodes(struct f2fs_sb_info *);
1226void write_checkpoint(struct f2fs_sb_info *, bool);
1227void init_orphan_info(struct f2fs_sb_info *);
1228int __init create_checkpoint_caches(void);
1229void destroy_checkpoint_caches(void);
1230
1231/*
1232 * data.c
1233 */
1234void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
1235int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, block_t, int);
1236void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, block_t,
1237						struct f2fs_io_info *);
 
 
 
 
 
 
 
 
1238int reserve_new_block(struct dnode_of_data *);
 
 
1239int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
1240void update_extent_cache(block_t, struct dnode_of_data *);
1241struct page *find_data_page(struct inode *, pgoff_t, bool);
1242struct page *get_lock_data_page(struct inode *, pgoff_t);
1243struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
1244int do_write_data_page(struct page *, struct f2fs_io_info *);
 
 
 
 
 
 
 
 
 
1245
1246/*
1247 * gc.c
1248 */
1249int start_gc_thread(struct f2fs_sb_info *);
1250void stop_gc_thread(struct f2fs_sb_info *);
1251block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
1252int f2fs_gc(struct f2fs_sb_info *);
1253void build_gc_manager(struct f2fs_sb_info *);
1254int __init create_gc_caches(void);
1255void destroy_gc_caches(void);
1256
1257/*
1258 * recovery.c
1259 */
1260int recover_fsync_data(struct f2fs_sb_info *);
1261bool space_for_roll_forward(struct f2fs_sb_info *);
1262
1263/*
1264 * debug.c
1265 */
1266#ifdef CONFIG_F2FS_STAT_FS
1267struct f2fs_stat_info {
1268	struct list_head stat_list;
1269	struct f2fs_sb_info *sbi;
1270	struct mutex stat_lock;
1271	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
1272	int main_area_segs, main_area_sections, main_area_zones;
1273	int hit_ext, total_ext;
1274	int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
1275	int nats, sits, fnids;
 
 
 
 
1276	int total_count, utilization;
1277	int bg_gc, inline_inode;
1278	unsigned int valid_count, valid_node_count, valid_inode_count;
 
1279	unsigned int bimodal, avg_vblocks;
1280	int util_free, util_valid, util_invalid;
1281	int rsvd_segs, overp_segs;
1282	int dirty_count, node_pages, meta_pages;
1283	int prefree_count, call_count, cp_count;
1284	int tot_segs, node_segs, data_segs, free_segs, free_secs;
 
1285	int tot_blks, data_blks, node_blks;
 
1286	int curseg[NR_CURSEG_TYPE];
1287	int cursec[NR_CURSEG_TYPE];
1288	int curzone[NR_CURSEG_TYPE];
1289
1290	unsigned int segment_count[2];
1291	unsigned int block_count[2];
1292	unsigned base_mem, cache_mem;
 
1293};
1294
1295static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
1296{
1297	return (struct f2fs_stat_info *)sbi->stat_info;
1298}
1299
1300#define stat_inc_cp_count(si)		((si)->cp_count++)
 
1301#define stat_inc_call_count(si)		((si)->call_count++)
1302#define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
1303#define stat_inc_dirty_dir(sbi)		((sbi)->n_dirty_dirs++)
1304#define stat_dec_dirty_dir(sbi)		((sbi)->n_dirty_dirs--)
1305#define stat_inc_total_hit(sb)		((F2FS_SB(sb))->total_hit_ext++)
1306#define stat_inc_read_hit(sb)		((F2FS_SB(sb))->read_hit_ext++)
 
 
 
 
 
 
 
 
 
 
 
 
1307#define stat_inc_inline_inode(inode)					\
1308	do {								\
1309		if (f2fs_has_inline_data(inode))			\
1310			((F2FS_SB(inode->i_sb))->inline_inode++);	\
1311	} while (0)
1312#define stat_dec_inline_inode(inode)					\
1313	do {								\
1314		if (f2fs_has_inline_data(inode))			\
1315			((F2FS_SB(inode->i_sb))->inline_inode--);	\
 
 
 
 
 
 
 
 
 
 
1316	} while (0)
1317
1318#define stat_inc_seg_type(sbi, curseg)					\
1319		((sbi)->segment_count[(curseg)->alloc_type]++)
1320#define stat_inc_block_count(sbi, curseg)				\
1321		((sbi)->block_count[(curseg)->alloc_type]++)
1322
1323#define stat_inc_seg_count(sbi, type)					\
 
1324	do {								\
1325		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
1326		(si)->tot_segs++;					\
1327		if (type == SUM_TYPE_DATA)				\
1328			si->data_segs++;				\
1329		else							\
 
1330			si->node_segs++;				\
 
 
1331	} while (0)
1332
1333#define stat_inc_tot_blk_count(si, blks)				\
1334	(si->tot_blks += (blks))
1335
1336#define stat_inc_data_blk_count(sbi, blks)				\
1337	do {								\
1338		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
1339		stat_inc_tot_blk_count(si, blks);			\
1340		si->data_blks += (blks);				\
 
1341	} while (0)
1342
1343#define stat_inc_node_blk_count(sbi, blks)				\
1344	do {								\
1345		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
1346		stat_inc_tot_blk_count(si, blks);			\
1347		si->node_blks += (blks);				\
 
1348	} while (0)
1349
1350int f2fs_build_stats(struct f2fs_sb_info *);
1351void f2fs_destroy_stats(struct f2fs_sb_info *);
1352void __init f2fs_create_root_stats(void);
1353void f2fs_destroy_root_stats(void);
1354#else
1355#define stat_inc_cp_count(si)
 
1356#define stat_inc_call_count(si)
1357#define stat_inc_bggc_count(si)
1358#define stat_inc_dirty_dir(sbi)
1359#define stat_dec_dirty_dir(sbi)
1360#define stat_inc_total_hit(sb)
1361#define stat_inc_read_hit(sb)
 
 
 
 
1362#define stat_inc_inline_inode(inode)
1363#define stat_dec_inline_inode(inode)
 
 
1364#define stat_inc_seg_type(sbi, curseg)
1365#define stat_inc_block_count(sbi, curseg)
1366#define stat_inc_seg_count(si, type)
 
1367#define stat_inc_tot_blk_count(si, blks)
1368#define stat_inc_data_blk_count(si, blks)
1369#define stat_inc_node_blk_count(sbi, blks)
1370
1371static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
1372static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
1373static inline void __init f2fs_create_root_stats(void) { }
1374static inline void f2fs_destroy_root_stats(void) { }
1375#endif
1376
1377extern const struct file_operations f2fs_dir_operations;
1378extern const struct file_operations f2fs_file_operations;
1379extern const struct inode_operations f2fs_file_inode_operations;
1380extern const struct address_space_operations f2fs_dblock_aops;
1381extern const struct address_space_operations f2fs_node_aops;
1382extern const struct address_space_operations f2fs_meta_aops;
1383extern const struct inode_operations f2fs_dir_inode_operations;
1384extern const struct inode_operations f2fs_symlink_inode_operations;
 
1385extern const struct inode_operations f2fs_special_inode_operations;
 
1386
1387/*
1388 * inline.c
1389 */
1390bool f2fs_may_inline(struct inode *);
 
 
 
1391int f2fs_read_inline_data(struct inode *, struct page *);
1392int f2fs_convert_inline_data(struct inode *, pgoff_t);
1393int f2fs_write_inline_data(struct inode *, struct page *, unsigned int);
1394int recover_inline_data(struct inode *, struct page *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1395#endif
v4.10.11
   1/*
   2 * fs/f2fs/f2fs.h
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#ifndef _LINUX_F2FS_H
  12#define _LINUX_F2FS_H
  13
  14#include <linux/types.h>
  15#include <linux/page-flags.h>
  16#include <linux/buffer_head.h>
  17#include <linux/slab.h>
  18#include <linux/crc32.h>
  19#include <linux/magic.h>
  20#include <linux/kobject.h>
  21#include <linux/sched.h>
  22#include <linux/vmalloc.h>
  23#include <linux/bio.h>
  24#include <linux/blkdev.h>
  25#include <linux/fscrypto.h>
  26#include <crypto/hash.h>
  27
  28#ifdef CONFIG_F2FS_CHECK_FS
  29#define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
 
  30#else
  31#define f2fs_bug_on(sbi, condition)					\
  32	do {								\
  33		if (unlikely(condition)) {				\
  34			WARN_ON(1);					\
  35			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
  36		}							\
  37	} while (0)
  38#endif
  39
  40#ifdef CONFIG_F2FS_FAULT_INJECTION
  41enum {
  42	FAULT_KMALLOC,
  43	FAULT_PAGE_ALLOC,
  44	FAULT_ALLOC_NID,
  45	FAULT_ORPHAN,
  46	FAULT_BLOCK,
  47	FAULT_DIR_DEPTH,
  48	FAULT_EVICT_INODE,
  49	FAULT_IO,
  50	FAULT_CHECKPOINT,
  51	FAULT_MAX,
  52};
  53
  54struct f2fs_fault_info {
  55	atomic_t inject_ops;
  56	unsigned int inject_rate;
  57	unsigned int inject_type;
  58};
  59
  60extern char *fault_name[FAULT_MAX];
  61#define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
  62#endif
  63
  64/*
  65 * For mount options
  66 */
  67#define F2FS_MOUNT_BG_GC		0x00000001
  68#define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
  69#define F2FS_MOUNT_DISCARD		0x00000004
  70#define F2FS_MOUNT_NOHEAP		0x00000008
  71#define F2FS_MOUNT_XATTR_USER		0x00000010
  72#define F2FS_MOUNT_POSIX_ACL		0x00000020
  73#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
  74#define F2FS_MOUNT_INLINE_XATTR		0x00000080
  75#define F2FS_MOUNT_INLINE_DATA		0x00000100
  76#define F2FS_MOUNT_INLINE_DENTRY	0x00000200
  77#define F2FS_MOUNT_FLUSH_MERGE		0x00000400
  78#define F2FS_MOUNT_NOBARRIER		0x00000800
  79#define F2FS_MOUNT_FASTBOOT		0x00001000
  80#define F2FS_MOUNT_EXTENT_CACHE		0x00002000
  81#define F2FS_MOUNT_FORCE_FG_GC		0x00004000
  82#define F2FS_MOUNT_DATA_FLUSH		0x00008000
  83#define F2FS_MOUNT_FAULT_INJECTION	0x00010000
  84#define F2FS_MOUNT_ADAPTIVE		0x00020000
  85#define F2FS_MOUNT_LFS			0x00040000
  86
  87#define clear_opt(sbi, option)	(sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
  88#define set_opt(sbi, option)	(sbi->mount_opt.opt |= F2FS_MOUNT_##option)
  89#define test_opt(sbi, option)	(sbi->mount_opt.opt & F2FS_MOUNT_##option)
  90
  91#define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
  92		typecheck(unsigned long long, b) &&			\
  93		((long long)((a) - (b)) > 0))
  94
  95typedef u32 block_t;	/*
  96			 * should not change u32, since it is the on-disk block
  97			 * address format, __le32.
  98			 */
  99typedef u32 nid_t;
 100
 101struct f2fs_mount_info {
 102	unsigned int	opt;
 103};
 104
 105#define F2FS_FEATURE_ENCRYPT	0x0001
 106#define F2FS_FEATURE_BLKZONED	0x0002
 107
 108#define F2FS_HAS_FEATURE(sb, mask)					\
 109	((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
 110#define F2FS_SET_FEATURE(sb, mask)					\
 111	F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask)
 112#define F2FS_CLEAR_FEATURE(sb, mask)					\
 113	F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)
 
 
 
 
 
 
 
 
 
 
 
 
 114
 115/*
 116 * For checkpoint manager
 117 */
 118enum {
 119	NAT_BITMAP,
 120	SIT_BITMAP
 121};
 122
 123enum {
 124	CP_UMOUNT,
 125	CP_FASTBOOT,
 126	CP_SYNC,
 127	CP_RECOVERY,
 128	CP_DISCARD,
 129};
 130
 131#define DEF_BATCHED_TRIM_SECTIONS	2
 132#define BATCHED_TRIM_SEGMENTS(sbi)	\
 133		(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
 134#define BATCHED_TRIM_BLOCKS(sbi)	\
 135		(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
 136#define DEF_CP_INTERVAL			60	/* 60 secs */
 137#define DEF_IDLE_INTERVAL		5	/* 5 secs */
 138
 139struct cp_control {
 140	int reason;
 141	__u64 trim_start;
 142	__u64 trim_end;
 143	__u64 trim_minlen;
 144	__u64 trimmed;
 145};
 146
 147/*
 148 * For CP/NAT/SIT/SSA readahead
 149 */
 150enum {
 151	META_CP,
 152	META_NAT,
 153	META_SIT,
 154	META_SSA,
 155	META_POR,
 156};
 157
 158/* for the list of ino */
 159enum {
 160	ORPHAN_INO,		/* for orphan ino list */
 161	APPEND_INO,		/* for append ino list */
 162	UPDATE_INO,		/* for update ino list */
 163	MAX_INO_ENTRY,		/* max. list */
 164};
 165
 166struct ino_entry {
 
 167	struct list_head list;	/* list head */
 168	nid_t ino;		/* inode number */
 169};
 170
 171/* for the list of inodes to be GCed */
 172struct inode_entry {
 173	struct list_head list;	/* list head */
 174	struct inode *inode;	/* vfs inode pointer */
 175};
 176
 177/* for the list of blockaddresses to be discarded */
 178struct discard_entry {
 179	struct list_head list;	/* list head */
 180	block_t blkaddr;	/* block address to be discarded */
 181	int len;		/* # of consecutive blocks of the discard */
 182};
 183
 184struct bio_entry {
 185	struct list_head list;
 186	struct bio *bio;
 187	struct completion event;
 188	int error;
 189};
 190
 191/* for the list of fsync inodes, used only during recovery */
 192struct fsync_inode_entry {
 193	struct list_head list;	/* list head */
 194	struct inode *inode;	/* vfs inode pointer */
 195	block_t blkaddr;	/* block address locating the last fsync */
 196	block_t last_dentry;	/* block address locating the last dentry */
 197};
 198
 199#define nats_in_cursum(jnl)		(le16_to_cpu(jnl->n_nats))
 200#define sits_in_cursum(jnl)		(le16_to_cpu(jnl->n_sits))
 201
 202#define nat_in_journal(jnl, i)		(jnl->nat_j.entries[i].ne)
 203#define nid_in_journal(jnl, i)		(jnl->nat_j.entries[i].nid)
 204#define sit_in_journal(jnl, i)		(jnl->sit_j.entries[i].se)
 205#define segno_in_journal(jnl, i)	(jnl->sit_j.entries[i].segno)
 206
 207#define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
 208#define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
 
 
 209
 210static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
 211{
 212	int before = nats_in_cursum(journal);
 213	journal->n_nats = cpu_to_le16(before + i);
 214	return before;
 215}
 216
 217static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
 218{
 219	int before = sits_in_cursum(journal);
 220	journal->n_sits = cpu_to_le16(before + i);
 221	return before;
 222}
 223
 224static inline bool __has_cursum_space(struct f2fs_journal *journal,
 225							int size, int type)
 226{
 227	if (type == NAT_JOURNAL)
 228		return size <= MAX_NAT_JENTRIES(journal);
 229	return size <= MAX_SIT_JENTRIES(journal);
 230}
 231
 232/*
 233 * ioctl commands
 234 */
 235#define F2FS_IOC_GETFLAGS		FS_IOC_GETFLAGS
 236#define F2FS_IOC_SETFLAGS		FS_IOC_SETFLAGS
 237#define F2FS_IOC_GETVERSION		FS_IOC_GETVERSION
 238
 239#define F2FS_IOCTL_MAGIC		0xf5
 240#define F2FS_IOC_START_ATOMIC_WRITE	_IO(F2FS_IOCTL_MAGIC, 1)
 241#define F2FS_IOC_COMMIT_ATOMIC_WRITE	_IO(F2FS_IOCTL_MAGIC, 2)
 242#define F2FS_IOC_START_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 3)
 243#define F2FS_IOC_RELEASE_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 4)
 244#define F2FS_IOC_ABORT_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 5)
 245#define F2FS_IOC_GARBAGE_COLLECT	_IO(F2FS_IOCTL_MAGIC, 6)
 246#define F2FS_IOC_WRITE_CHECKPOINT	_IO(F2FS_IOCTL_MAGIC, 7)
 247#define F2FS_IOC_DEFRAGMENT		_IO(F2FS_IOCTL_MAGIC, 8)
 248#define F2FS_IOC_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
 249						struct f2fs_move_range)
 250
 251#define F2FS_IOC_SET_ENCRYPTION_POLICY	FS_IOC_SET_ENCRYPTION_POLICY
 252#define F2FS_IOC_GET_ENCRYPTION_POLICY	FS_IOC_GET_ENCRYPTION_POLICY
 253#define F2FS_IOC_GET_ENCRYPTION_PWSALT	FS_IOC_GET_ENCRYPTION_PWSALT
 254
 255/*
 256 * should be same as XFS_IOC_GOINGDOWN.
 257 * Flags for going down operation used by FS_IOC_GOINGDOWN
 258 */
 259#define F2FS_IOC_SHUTDOWN	_IOR('X', 125, __u32)	/* Shutdown */
 260#define F2FS_GOING_DOWN_FULLSYNC	0x0	/* going down with full sync */
 261#define F2FS_GOING_DOWN_METASYNC	0x1	/* going down with metadata */
 262#define F2FS_GOING_DOWN_NOSYNC		0x2	/* going down */
 263#define F2FS_GOING_DOWN_METAFLUSH	0x3	/* going down with meta flush */
 264
 265#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 266/*
 267 * ioctl commands in 32 bit emulation
 268 */
 269#define F2FS_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
 270#define F2FS_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
 271#define F2FS_IOC32_GETVERSION		FS_IOC32_GETVERSION
 272#endif
 273
 274struct f2fs_defragment {
 275	u64 start;
 276	u64 len;
 277};
 278
 279struct f2fs_move_range {
 280	u32 dst_fd;		/* destination fd */
 281	u64 pos_in;		/* start position in src_fd */
 282	u64 pos_out;		/* start position in dst_fd */
 283	u64 len;		/* size to move */
 284};
 285
 286/*
 287 * For INODE and NODE manager
 288 */
 289/* for directory operations */
 290struct f2fs_dentry_ptr {
 291	struct inode *inode;
 292	const void *bitmap;
 293	struct f2fs_dir_entry *dentry;
 294	__u8 (*filename)[F2FS_SLOT_LEN];
 295	int max;
 296};
 297
 298static inline void make_dentry_ptr(struct inode *inode,
 299		struct f2fs_dentry_ptr *d, void *src, int type)
 300{
 301	d->inode = inode;
 302
 303	if (type == 1) {
 304		struct f2fs_dentry_block *t = (struct f2fs_dentry_block *)src;
 305		d->max = NR_DENTRY_IN_BLOCK;
 306		d->bitmap = &t->dentry_bitmap;
 307		d->dentry = t->dentry;
 308		d->filename = t->filename;
 309	} else {
 310		struct f2fs_inline_dentry *t = (struct f2fs_inline_dentry *)src;
 311		d->max = NR_INLINE_DENTRY;
 312		d->bitmap = &t->dentry_bitmap;
 313		d->dentry = t->dentry;
 314		d->filename = t->filename;
 315	}
 316}
 317
 318/*
 319 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
 320 * as its node offset to distinguish from index node blocks.
 321 * But some bits are used to mark the node block.
 322 */
 323#define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
 324				>> OFFSET_BIT_SHIFT)
 325enum {
 326	ALLOC_NODE,			/* allocate a new node page if needed */
 327	LOOKUP_NODE,			/* look up a node without readahead */
 328	LOOKUP_NODE_RA,			/*
 329					 * look up a node with readahead called
 330					 * by get_data_block.
 331					 */
 332};
 333
 334#define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
 335
 336#define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 337
 338/* vector size for gang look-up from extent cache that consists of radix tree */
 339#define EXT_TREE_VEC_SIZE	64
 340
 341/* for in-memory extent cache entry */
 342#define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
 343
 344/* number of extent info in extent cache we try to shrink */
 345#define EXTENT_CACHE_SHRINK_NUMBER	128
 346
 347struct extent_info {
 348	unsigned int fofs;		/* start offset in a file */
 349	u32 blk;			/* start block address of the extent */
 350	unsigned int len;		/* length of the extent */
 
 351};
 352
 353struct extent_node {
 354	struct rb_node rb_node;		/* rb node located in rb-tree */
 355	struct list_head list;		/* node in global extent list of sbi */
 356	struct extent_info ei;		/* extent info */
 357	struct extent_tree *et;		/* extent tree pointer */
 358};
 359
 360struct extent_tree {
 361	nid_t ino;			/* inode number */
 362	struct rb_root root;		/* root of extent info rb-tree */
 363	struct extent_node *cached_en;	/* recently accessed extent node */
 364	struct extent_info largest;	/* largested extent info */
 365	struct list_head list;		/* to be used by sbi->zombie_list */
 366	rwlock_t lock;			/* protect extent info rb-tree */
 367	atomic_t node_cnt;		/* # of extent node in rb-tree*/
 368};
 369
 370/*
 371 * This structure is taken from ext4_map_blocks.
 372 *
 373 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
 374 */
 375#define F2FS_MAP_NEW		(1 << BH_New)
 376#define F2FS_MAP_MAPPED		(1 << BH_Mapped)
 377#define F2FS_MAP_UNWRITTEN	(1 << BH_Unwritten)
 378#define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
 379				F2FS_MAP_UNWRITTEN)
 380
 381struct f2fs_map_blocks {
 382	block_t m_pblk;
 383	block_t m_lblk;
 384	unsigned int m_len;
 385	unsigned int m_flags;
 386	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
 387};
 388
 389/* for flag in get_data_block */
 390#define F2FS_GET_BLOCK_READ		0
 391#define F2FS_GET_BLOCK_DIO		1
 392#define F2FS_GET_BLOCK_FIEMAP		2
 393#define F2FS_GET_BLOCK_BMAP		3
 394#define F2FS_GET_BLOCK_PRE_DIO		4
 395#define F2FS_GET_BLOCK_PRE_AIO		5
 396
 397/*
 398 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
 399 */
 400#define FADVISE_COLD_BIT	0x01
 401#define FADVISE_LOST_PINO_BIT	0x02
 402#define FADVISE_ENCRYPT_BIT	0x04
 403#define FADVISE_ENC_NAME_BIT	0x08
 404#define FADVISE_KEEP_SIZE_BIT	0x10
 405
 406#define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
 407#define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
 408#define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
 409#define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
 410#define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
 411#define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
 412#define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
 413#define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
 414#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
 415#define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
 416#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
 417#define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
 418#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
 419
 420#define DEF_DIR_LEVEL		0
 421
 422struct f2fs_inode_info {
 423	struct inode vfs_inode;		/* serve a vfs inode */
 424	unsigned long i_flags;		/* keep an inode flags for ioctl */
 425	unsigned char i_advise;		/* use to give file attribute hints */
 426	unsigned char i_dir_level;	/* use for dentry level for large dir */
 427	unsigned int i_current_depth;	/* use only in directory structure */
 428	unsigned int i_pino;		/* parent inode number */
 429	umode_t i_acl_mode;		/* keep file acl mode temporarily */
 430
 431	/* Use below internally in f2fs*/
 432	unsigned long flags;		/* use to pass per-file flags */
 433	struct rw_semaphore i_sem;	/* protect fi info */
 434	atomic_t dirty_pages;		/* # of dirty pages */
 435	f2fs_hash_t chash;		/* hash value of given file name */
 436	unsigned int clevel;		/* maximum level of given file name */
 437	struct task_struct *task;	/* lookup and create consistency */
 438	nid_t i_xattr_nid;		/* node id that contains xattrs */
 439	unsigned long long xattr_ver;	/* cp version of xattr modification */
 440	loff_t	last_disk_size;		/* lastly written file size */
 441
 442	struct list_head dirty_list;	/* dirty list for dirs and files */
 443	struct list_head gdirty_list;	/* linked in global dirty list */
 444	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
 445	struct mutex inmem_lock;	/* lock for inmemory pages */
 446	struct extent_tree *extent_tree;	/* cached extent_tree entry */
 447	struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
 448};
 449
 450static inline void get_extent_info(struct extent_info *ext,
 451					struct f2fs_extent *i_ext)
 452{
 453	ext->fofs = le32_to_cpu(i_ext->fofs);
 454	ext->blk = le32_to_cpu(i_ext->blk);
 455	ext->len = le32_to_cpu(i_ext->len);
 
 
 456}
 457
 458static inline void set_raw_extent(struct extent_info *ext,
 459					struct f2fs_extent *i_ext)
 460{
 
 461	i_ext->fofs = cpu_to_le32(ext->fofs);
 462	i_ext->blk = cpu_to_le32(ext->blk);
 463	i_ext->len = cpu_to_le32(ext->len);
 
 464}
 465
 466static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
 467						u32 blk, unsigned int len)
 468{
 469	ei->fofs = fofs;
 470	ei->blk = blk;
 471	ei->len = len;
 472}
 473
 474static inline bool __is_extent_same(struct extent_info *ei1,
 475						struct extent_info *ei2)
 476{
 477	return (ei1->fofs == ei2->fofs && ei1->blk == ei2->blk &&
 478						ei1->len == ei2->len);
 479}
 480
 481static inline bool __is_extent_mergeable(struct extent_info *back,
 482						struct extent_info *front)
 483{
 484	return (back->fofs + back->len == front->fofs &&
 485			back->blk + back->len == front->blk);
 486}
 487
 488static inline bool __is_back_mergeable(struct extent_info *cur,
 489						struct extent_info *back)
 490{
 491	return __is_extent_mergeable(back, cur);
 492}
 493
 494static inline bool __is_front_mergeable(struct extent_info *cur,
 495						struct extent_info *front)
 496{
 497	return __is_extent_mergeable(cur, front);
 498}
 499
 500extern void f2fs_mark_inode_dirty_sync(struct inode *, bool);
 501static inline void __try_update_largest_extent(struct inode *inode,
 502			struct extent_tree *et, struct extent_node *en)
 503{
 504	if (en->ei.len > et->largest.len) {
 505		et->largest = en->ei;
 506		f2fs_mark_inode_dirty_sync(inode, true);
 507	}
 508}
 509
 510enum nid_list {
 511	FREE_NID_LIST,
 512	ALLOC_NID_LIST,
 513	MAX_NID_LIST,
 514};
 515
 516struct f2fs_nm_info {
 517	block_t nat_blkaddr;		/* base disk address of NAT */
 518	nid_t max_nid;			/* maximum possible node ids */
 519	nid_t available_nids;		/* # of available node ids */
 520	nid_t next_scan_nid;		/* the next nid to be scanned */
 521	unsigned int ram_thresh;	/* control the memory footprint */
 522	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
 523	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
 524
 525	/* NAT cache management */
 526	struct radix_tree_root nat_root;/* root of the nat entry cache */
 527	struct radix_tree_root nat_set_root;/* root of the nat set cache */
 528	struct rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
 529	struct list_head nat_entries;	/* cached nat entry list (clean) */
 530	unsigned int nat_cnt;		/* the # of cached nat entries */
 531	unsigned int dirty_nat_cnt;	/* total num of nat entries in set */
 532
 533	/* free node ids management */
 534	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
 535	struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
 536	unsigned int nid_cnt[MAX_NID_LIST];	/* the number of free node id */
 537	spinlock_t nid_list_lock;	/* protect nid lists ops */
 538	struct mutex build_lock;	/* lock for build free nids */
 539
 540	/* for checkpoint */
 541	char *nat_bitmap;		/* NAT bitmap pointer */
 542	int bitmap_size;		/* bitmap size */
 543};
 544
 545/*
 546 * this structure is used as one of function parameters.
 547 * all the information are dedicated to a given direct node block determined
 548 * by the data offset in a file.
 549 */
 550struct dnode_of_data {
 551	struct inode *inode;		/* vfs inode pointer */
 552	struct page *inode_page;	/* its inode page, NULL is possible */
 553	struct page *node_page;		/* cached direct node page */
 554	nid_t nid;			/* node id of the direct node block */
 555	unsigned int ofs_in_node;	/* data offset in the node page */
 556	bool inode_page_locked;		/* inode page is locked or not */
 557	bool node_changed;		/* is node block changed */
 558	char cur_level;			/* level of hole node page */
 559	char max_level;			/* level of current page located */
 560	block_t	data_blkaddr;		/* block address of the node block */
 561};
 562
 563static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
 564		struct page *ipage, struct page *npage, nid_t nid)
 565{
 566	memset(dn, 0, sizeof(*dn));
 567	dn->inode = inode;
 568	dn->inode_page = ipage;
 569	dn->node_page = npage;
 570	dn->nid = nid;
 571}
 572
 573/*
 574 * For SIT manager
 575 *
 576 * By default, there are 6 active log areas across the whole main area.
 577 * When considering hot and cold data separation to reduce cleaning overhead,
 578 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
 579 * respectively.
 580 * In the current design, you should not change the numbers intentionally.
 581 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
 582 * logs individually according to the underlying devices. (default: 6)
 583 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
 584 * data and 8 for node logs.
 585 */
 586#define	NR_CURSEG_DATA_TYPE	(3)
 587#define NR_CURSEG_NODE_TYPE	(3)
 588#define NR_CURSEG_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
 589
 590enum {
 591	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
 592	CURSEG_WARM_DATA,	/* data blocks */
 593	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
 594	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
 595	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
 596	CURSEG_COLD_NODE,	/* indirect node blocks */
 597	NO_CHECK_TYPE,
 598};
 599
 600struct flush_cmd {
 
 601	struct completion wait;
 602	struct llist_node llnode;
 603	int ret;
 604};
 605
 606struct flush_cmd_control {
 607	struct task_struct *f2fs_issue_flush;	/* flush thread */
 608	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
 609	atomic_t submit_flush;			/* # of issued flushes */
 610	struct llist_head issue_list;		/* list for command issue */
 611	struct llist_node *dispatch_list;	/* list for command dispatch */
 612};
 613
 614struct f2fs_sm_info {
 615	struct sit_info *sit_info;		/* whole segment information */
 616	struct free_segmap_info *free_info;	/* free segment information */
 617	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
 618	struct curseg_info *curseg_array;	/* active segment information */
 619
 
 
 
 620	block_t seg0_blkaddr;		/* block address of 0'th segment */
 621	block_t main_blkaddr;		/* start block address of main area */
 622	block_t ssa_blkaddr;		/* start block address of SSA area */
 623
 624	unsigned int segment_count;	/* total # of segments */
 625	unsigned int main_segments;	/* # of segments in main area */
 626	unsigned int reserved_segments;	/* # of reserved segments */
 627	unsigned int ovp_segments;	/* # of overprovision segments */
 628
 629	/* a threshold to reclaim prefree segments */
 630	unsigned int rec_prefree_segments;
 631
 632	/* for small discard management */
 633	struct list_head discard_list;		/* 4KB discard list */
 634	struct list_head wait_list;		/* linked with issued discard bio */
 635	int nr_discards;			/* # of discards in the list */
 636	int max_discards;			/* max. discards to be issued */
 637
 638	/* for batched trimming */
 639	unsigned int trim_sections;		/* # of sections to trim */
 640
 641	struct list_head sit_entry_set;	/* sit entry set list */
 642
 643	unsigned int ipu_policy;	/* in-place-update policy */
 644	unsigned int min_ipu_util;	/* in-place-update threshold */
 645	unsigned int min_fsync_blocks;	/* threshold for fsync */
 646
 647	/* for flush command control */
 648	struct flush_cmd_control *cmd_control_info;
 649
 
 
 
 
 650};
 651
 652/*
 653 * For superblock
 654 */
 655/*
 656 * COUNT_TYPE for monitoring
 657 *
 658 * f2fs monitors the number of several block types such as on-writeback,
 659 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
 660 */
 661#define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
 662enum count_type {
 
 663	F2FS_DIRTY_DENTS,
 664	F2FS_DIRTY_DATA,
 665	F2FS_DIRTY_NODES,
 666	F2FS_DIRTY_META,
 667	F2FS_INMEM_PAGES,
 668	F2FS_DIRTY_IMETA,
 669	F2FS_WB_CP_DATA,
 670	F2FS_WB_DATA,
 671	NR_COUNT_TYPE,
 672};
 673
 674/*
 675 * The below are the page types of bios used in submit_bio().
 676 * The available types are:
 677 * DATA			User data pages. It operates as async mode.
 678 * NODE			Node pages. It operates as async mode.
 679 * META			FS metadata pages such as SIT, NAT, CP.
 680 * NR_PAGE_TYPE		The number of page types.
 681 * META_FLUSH		Make sure the previous pages are written
 682 *			with waiting the bio's completion
 683 * ...			Only can be used with META.
 684 */
 685#define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
 686enum page_type {
 687	DATA,
 688	NODE,
 689	META,
 690	NR_PAGE_TYPE,
 691	META_FLUSH,
 692	INMEM,		/* the below types are used by tracepoints only. */
 693	INMEM_DROP,
 694	INMEM_REVOKE,
 695	IPU,
 696	OPU,
 697};
 698
 699struct f2fs_io_info {
 700	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
 701	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
 702	int op;			/* contains REQ_OP_ */
 703	int op_flags;		/* req_flag_bits */
 704	block_t new_blkaddr;	/* new block address to be written */
 705	block_t old_blkaddr;	/* old block address before Cow */
 706	struct page *page;	/* page to be written */
 707	struct page *encrypted_page;	/* encrypted page */
 708};
 709
 710#define is_read_io(rw) (rw == READ)
 711struct f2fs_bio_info {
 712	struct f2fs_sb_info *sbi;	/* f2fs superblock */
 713	struct bio *bio;		/* bios to merge */
 714	sector_t last_block_in_bio;	/* last block number */
 715	struct f2fs_io_info fio;	/* store buffered io info. */
 716	struct rw_semaphore io_rwsem;	/* blocking op for bio */
 717};
 718
 719#define FDEV(i)				(sbi->devs[i])
 720#define RDEV(i)				(raw_super->devs[i])
 721struct f2fs_dev_info {
 722	struct block_device *bdev;
 723	char path[MAX_PATH_LEN];
 724	unsigned int total_segments;
 725	block_t start_blk;
 726	block_t end_blk;
 727#ifdef CONFIG_BLK_DEV_ZONED
 728	unsigned int nr_blkz;			/* Total number of zones */
 729	u8 *blkz_type;				/* Array of zones type */
 730#endif
 731};
 732
 733enum inode_type {
 734	DIR_INODE,			/* for dirty dir inode */
 735	FILE_INODE,			/* for dirty regular/symlink inode */
 736	DIRTY_META,			/* for all dirtied inode metadata */
 737	NR_INODE_TYPE,
 738};
 739
 740/* for inner inode cache management */
 741struct inode_management {
 742	struct radix_tree_root ino_root;	/* ino entry array */
 743	spinlock_t ino_lock;			/* for ino entry lock */
 744	struct list_head ino_list;		/* inode list head */
 745	unsigned long ino_num;			/* number of entries */
 746};
 747
 748/* For s_flag in struct f2fs_sb_info */
 749enum {
 750	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
 751	SBI_IS_CLOSE,				/* specify unmounting */
 752	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
 753	SBI_POR_DOING,				/* recovery is doing or not */
 754	SBI_NEED_SB_WRITE,			/* need to recover superblock */
 755	SBI_NEED_CP,				/* need to checkpoint */
 756};
 757
 758enum {
 759	CP_TIME,
 760	REQ_TIME,
 761	MAX_TIME,
 762};
 763
 764#ifdef CONFIG_F2FS_FS_ENCRYPTION
 765#define F2FS_KEY_DESC_PREFIX "f2fs:"
 766#define F2FS_KEY_DESC_PREFIX_SIZE 5
 767#endif
 768struct f2fs_sb_info {
 769	struct super_block *sb;			/* pointer to VFS super block */
 770	struct proc_dir_entry *s_proc;		/* proc entry */
 
 771	struct f2fs_super_block *raw_super;	/* raw super block pointer */
 772	int valid_super_block;			/* valid super block no */
 773	unsigned long s_flag;				/* flags for sbi */
 774
 775#ifdef CONFIG_F2FS_FS_ENCRYPTION
 776	u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
 777	u8 key_prefix_size;
 778#endif
 779
 780#ifdef CONFIG_BLK_DEV_ZONED
 781	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
 782	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
 783#endif
 784
 785	/* for node-related operations */
 786	struct f2fs_nm_info *nm_info;		/* node manager */
 787	struct inode *node_inode;		/* cache node blocks */
 788
 789	/* for segment-related operations */
 790	struct f2fs_sm_info *sm_info;		/* segment manager */
 791
 792	/* for bio operations */
 793	struct f2fs_bio_info read_io;			/* for read bios */
 794	struct f2fs_bio_info write_io[NR_PAGE_TYPE];	/* for write bios */
 795	struct mutex wio_mutex[NODE + 1];	/* bio ordering for NODE/DATA */
 796
 797	/* for checkpoint */
 798	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
 799	int cur_cp_pack;			/* remain current cp pack */
 800	spinlock_t cp_lock;			/* for flag in ckpt */
 801	struct inode *meta_inode;		/* cache meta blocks */
 802	struct mutex cp_mutex;			/* checkpoint procedure lock */
 803	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
 804	struct rw_semaphore node_write;		/* locking node writes */
 
 
 805	wait_queue_head_t cp_wait;
 806	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
 807	long interval_time[MAX_TIME];		/* to store thresholds */
 808
 809	struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
 810
 811	/* for orphan inode, use 0'th array */
 
 
 
 812	unsigned int max_orphans;		/* max orphan inodes */
 813
 814	/* for inode management */
 815	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
 816	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
 817
 818	/* for extent tree cache */
 819	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
 820	struct rw_semaphore extent_tree_lock;	/* locking extent radix tree */
 821	struct list_head extent_list;		/* lru list for shrinker */
 822	spinlock_t extent_lock;			/* locking extent lru list */
 823	atomic_t total_ext_tree;		/* extent tree count */
 824	struct list_head zombie_list;		/* extent zombie tree list */
 825	atomic_t total_zombie_tree;		/* extent zombie tree count */
 826	atomic_t total_ext_node;		/* extent info count */
 827
 828	/* basic filesystem units */
 829	unsigned int log_sectors_per_block;	/* log2 sectors per block */
 830	unsigned int log_blocksize;		/* log2 block size */
 831	unsigned int blocksize;			/* block size */
 832	unsigned int root_ino_num;		/* root inode number*/
 833	unsigned int node_ino_num;		/* node inode number*/
 834	unsigned int meta_ino_num;		/* meta inode number*/
 835	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
 836	unsigned int blocks_per_seg;		/* blocks per segment */
 837	unsigned int segs_per_sec;		/* segments per section */
 838	unsigned int secs_per_zone;		/* sections per zone */
 839	unsigned int total_sections;		/* total section count */
 840	unsigned int total_node_count;		/* total node block count */
 841	unsigned int total_valid_node_count;	/* valid node block count */
 842	loff_t max_file_blocks;			/* max block index of file */
 843	int active_logs;			/* # of active logs */
 844	int dir_level;				/* directory level */
 845
 846	block_t user_block_count;		/* # of user blocks */
 847	block_t total_valid_block_count;	/* # of valid blocks */
 848	block_t discard_blks;			/* discard command candidats */
 849	block_t last_valid_block_count;		/* for recovery */
 850	u32 s_next_generation;			/* for NFS support */
 851
 852	/* # of pages, see count_type */
 853	atomic_t nr_pages[NR_COUNT_TYPE];
 854	/* # of allocated blocks */
 855	struct percpu_counter alloc_valid_block_count;
 856
 857	/* valid inode count */
 858	struct percpu_counter total_valid_inode_count;
 859
 860	struct f2fs_mount_info mount_opt;	/* mount options */
 861
 862	/* for cleaning operations */
 863	struct mutex gc_mutex;			/* mutex for GC */
 864	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
 865	unsigned int cur_victim_sec;		/* current victim section num */
 866
 867	/* threshold for converting bg victims for fg */
 868	u64 fggc_threshold;
 869
 870	/* maximum # of trials to find a victim segment for SSR and GC */
 871	unsigned int max_victim_search;
 872
 873	/*
 874	 * for stat information.
 875	 * one is for the LFS mode, and the other is for the SSR mode.
 876	 */
 877#ifdef CONFIG_F2FS_STAT_FS
 878	struct f2fs_stat_info *stat_info;	/* FS status information */
 879	unsigned int segment_count[2];		/* # of allocated segments */
 880	unsigned int block_count[2];		/* # of allocated blocks */
 881	atomic_t inplace_count;		/* # of inplace update */
 882	atomic64_t total_hit_ext;		/* # of lookup extent cache */
 883	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
 884	atomic64_t read_hit_largest;		/* # of hit largest extent node */
 885	atomic64_t read_hit_cached;		/* # of hit cached extent node */
 886	atomic_t inline_xattr;			/* # of inline_xattr inodes */
 887	atomic_t inline_inode;			/* # of inline_data inodes */
 888	atomic_t inline_dir;			/* # of inline_dentry inodes */
 889	int bg_gc;				/* background gc calls */
 890	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
 891#endif
 892	unsigned int last_victim[2];		/* last victim segment # */
 893	spinlock_t stat_lock;			/* lock for stat operations */
 894
 895	/* For sysfs suppport */
 896	struct kobject s_kobj;
 897	struct completion s_kobj_unregister;
 898
 899	/* For shrinker support */
 900	struct list_head s_list;
 901	int s_ndevs;				/* number of devices */
 902	struct f2fs_dev_info *devs;		/* for device list */
 903	struct mutex umount_mutex;
 904	unsigned int shrinker_run_no;
 905
 906	/* For write statistics */
 907	u64 sectors_written_start;
 908	u64 kbytes_written;
 909
 910	/* Reference to checksum algorithm driver via cryptoapi */
 911	struct crypto_shash *s_chksum_driver;
 912
 913	/* For fault injection */
 914#ifdef CONFIG_F2FS_FAULT_INJECTION
 915	struct f2fs_fault_info fault_info;
 916#endif
 917};
 918
 919#ifdef CONFIG_F2FS_FAULT_INJECTION
 920static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
 921{
 922	struct f2fs_fault_info *ffi = &sbi->fault_info;
 923
 924	if (!ffi->inject_rate)
 925		return false;
 926
 927	if (!IS_FAULT_SET(ffi, type))
 928		return false;
 929
 930	atomic_inc(&ffi->inject_ops);
 931	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
 932		atomic_set(&ffi->inject_ops, 0);
 933		printk("%sF2FS-fs : inject %s in %pF\n",
 934				KERN_INFO,
 935				fault_name[type],
 936				__builtin_return_address(0));
 937		return true;
 938	}
 939	return false;
 940}
 941#endif
 942
 943/* For write statistics. Suppose sector size is 512 bytes,
 944 * and the return value is in kbytes. s is of struct f2fs_sb_info.
 945 */
 946#define BD_PART_WRITTEN(s)						 \
 947(((u64)part_stat_read(s->sb->s_bdev->bd_part, sectors[1]) -		 \
 948		s->sectors_written_start) >> 1)
 949
 950static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
 951{
 952	sbi->last_time[type] = jiffies;
 953}
 954
 955static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
 956{
 957	struct timespec ts = {sbi->interval_time[type], 0};
 958	unsigned long interval = timespec_to_jiffies(&ts);
 959
 960	return time_after(jiffies, sbi->last_time[type] + interval);
 961}
 962
 963static inline bool is_idle(struct f2fs_sb_info *sbi)
 964{
 965	struct block_device *bdev = sbi->sb->s_bdev;
 966	struct request_queue *q = bdev_get_queue(bdev);
 967	struct request_list *rl = &q->root_rl;
 968
 969	if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
 970		return 0;
 971
 972	return f2fs_time_over(sbi, REQ_TIME);
 973}
 974
 975/*
 976 * Inline functions
 977 */
 978static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
 979			   unsigned int length)
 980{
 981	SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
 982	u32 *ctx = (u32 *)shash_desc_ctx(shash);
 983	int err;
 984
 985	shash->tfm = sbi->s_chksum_driver;
 986	shash->flags = 0;
 987	*ctx = F2FS_SUPER_MAGIC;
 988
 989	err = crypto_shash_update(shash, address, length);
 990	BUG_ON(err);
 991
 992	return *ctx;
 993}
 994
 995static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
 996				  void *buf, size_t buf_size)
 997{
 998	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
 999}
1000
1001static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1002{
1003	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1004}
1005
1006static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1007{
1008	return sb->s_fs_info;
1009}
1010
1011static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1012{
1013	return F2FS_SB(inode->i_sb);
1014}
1015
1016static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1017{
1018	return F2FS_I_SB(mapping->host);
1019}
1020
1021static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1022{
1023	return F2FS_M_SB(page->mapping);
1024}
1025
1026static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1027{
1028	return (struct f2fs_super_block *)(sbi->raw_super);
1029}
1030
1031static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1032{
1033	return (struct f2fs_checkpoint *)(sbi->ckpt);
1034}
1035
1036static inline struct f2fs_node *F2FS_NODE(struct page *page)
1037{
1038	return (struct f2fs_node *)page_address(page);
1039}
1040
1041static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1042{
1043	return &((struct f2fs_node *)page_address(page))->i;
1044}
1045
1046static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1047{
1048	return (struct f2fs_nm_info *)(sbi->nm_info);
1049}
1050
1051static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1052{
1053	return (struct f2fs_sm_info *)(sbi->sm_info);
1054}
1055
1056static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1057{
1058	return (struct sit_info *)(SM_I(sbi)->sit_info);
1059}
1060
1061static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1062{
1063	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1064}
1065
1066static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1067{
1068	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1069}
1070
1071static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1072{
1073	return sbi->meta_inode->i_mapping;
1074}
1075
1076static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1077{
1078	return sbi->node_inode->i_mapping;
1079}
1080
1081static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1082{
1083	return test_bit(type, &sbi->s_flag);
1084}
1085
1086static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1087{
1088	set_bit(type, &sbi->s_flag);
1089}
1090
1091static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1092{
1093	clear_bit(type, &sbi->s_flag);
1094}
1095
1096static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
1097{
1098	return le64_to_cpu(cp->checkpoint_ver);
1099}
1100
1101static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1102{
1103	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1104
1105	return ckpt_flags & f;
1106}
1107
1108static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1109{
1110	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
1111}
1112
1113static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1114{
1115	unsigned int ckpt_flags;
1116
1117	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1118	ckpt_flags |= f;
1119	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1120}
1121
1122static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1123{
1124	spin_lock(&sbi->cp_lock);
1125	__set_ckpt_flags(F2FS_CKPT(sbi), f);
1126	spin_unlock(&sbi->cp_lock);
1127}
1128
1129static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1130{
1131	unsigned int ckpt_flags;
1132
1133	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1134	ckpt_flags &= (~f);
1135	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1136}
1137
1138static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1139{
1140	spin_lock(&sbi->cp_lock);
1141	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
1142	spin_unlock(&sbi->cp_lock);
1143}
1144
1145static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1146{
1147	down_read(&sbi->cp_rwsem);
1148}
1149
1150static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1151{
1152	up_read(&sbi->cp_rwsem);
1153}
1154
1155static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1156{
1157	down_write(&sbi->cp_rwsem);
1158}
1159
1160static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1161{
1162	up_write(&sbi->cp_rwsem);
1163}
1164
1165static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
1166{
1167	int reason = CP_SYNC;
1168
1169	if (test_opt(sbi, FASTBOOT))
1170		reason = CP_FASTBOOT;
1171	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
1172		reason = CP_UMOUNT;
1173	return reason;
1174}
1175
1176static inline bool __remain_node_summaries(int reason)
1177{
1178	return (reason == CP_UMOUNT || reason == CP_FASTBOOT);
1179}
1180
1181static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
1182{
1183	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
1184			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
1185}
1186
1187/*
1188 * Check whether the given nid is within node id range.
1189 */
1190static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
1191{
1192	if (unlikely(nid < F2FS_ROOT_INO(sbi)))
1193		return -EINVAL;
1194	if (unlikely(nid >= NM_I(sbi)->max_nid))
1195		return -EINVAL;
1196	return 0;
1197}
1198
1199#define F2FS_DEFAULT_ALLOCATED_BLOCKS	1
1200
1201/*
1202 * Check whether the inode has blocks or not
1203 */
1204static inline int F2FS_HAS_BLOCKS(struct inode *inode)
1205{
1206	if (F2FS_I(inode)->i_xattr_nid)
1207		return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1;
1208	else
1209		return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS;
1210}
1211
1212static inline bool f2fs_has_xattr_block(unsigned int ofs)
1213{
1214	return ofs == XATTR_NODE_OFFSET;
1215}
1216
1217static inline void f2fs_i_blocks_write(struct inode *, blkcnt_t, bool);
1218static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
1219				 struct inode *inode, blkcnt_t *count)
1220{
1221	blkcnt_t diff;
1222
1223#ifdef CONFIG_F2FS_FAULT_INJECTION
1224	if (time_to_inject(sbi, FAULT_BLOCK))
 
 
 
1225		return false;
1226#endif
1227	/*
1228	 * let's increase this in prior to actual block count change in order
1229	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
1230	 */
1231	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
1232
1233	spin_lock(&sbi->stat_lock);
1234	sbi->total_valid_block_count += (block_t)(*count);
1235	if (unlikely(sbi->total_valid_block_count > sbi->user_block_count)) {
1236		diff = sbi->total_valid_block_count - sbi->user_block_count;
1237		*count -= diff;
1238		sbi->total_valid_block_count = sbi->user_block_count;
1239		if (!*count) {
1240			spin_unlock(&sbi->stat_lock);
1241			percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
1242			return false;
1243		}
1244	}
 
 
 
1245	spin_unlock(&sbi->stat_lock);
1246
1247	f2fs_i_blocks_write(inode, *count, true);
1248	return true;
1249}
1250
1251static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
1252						struct inode *inode,
1253						blkcnt_t count)
1254{
1255	spin_lock(&sbi->stat_lock);
1256	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
1257	f2fs_bug_on(sbi, inode->i_blocks < count);
 
1258	sbi->total_valid_block_count -= (block_t)count;
1259	spin_unlock(&sbi->stat_lock);
1260	f2fs_i_blocks_write(inode, count, false);
1261}
1262
1263static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
1264{
1265	atomic_inc(&sbi->nr_pages[count_type]);
1266
1267	if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
1268		count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
1269		return;
1270
1271	set_sbi_flag(sbi, SBI_IS_DIRTY);
1272}
1273
1274static inline void inode_inc_dirty_pages(struct inode *inode)
1275{
1276	atomic_inc(&F2FS_I(inode)->dirty_pages);
1277	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
1278				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
1279}
1280
1281static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
1282{
1283	atomic_dec(&sbi->nr_pages[count_type]);
1284}
1285
1286static inline void inode_dec_dirty_pages(struct inode *inode)
1287{
1288	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1289			!S_ISLNK(inode->i_mode))
1290		return;
1291
1292	atomic_dec(&F2FS_I(inode)->dirty_pages);
1293	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
1294				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
1295}
1296
1297static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
1298{
1299	return atomic_read(&sbi->nr_pages[count_type]);
1300}
1301
1302static inline int get_dirty_pages(struct inode *inode)
1303{
1304	return atomic_read(&F2FS_I(inode)->dirty_pages);
1305}
1306
1307static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
1308{
1309	unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
1310	unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
1311						sbi->log_blocks_per_seg;
1312
1313	return segs / sbi->segs_per_sec;
1314}
1315
1316static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
1317{
1318	return sbi->total_valid_block_count;
1319}
1320
1321static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
1322{
1323	return sbi->discard_blks;
1324}
1325
1326static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
1327{
1328	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1329
1330	/* return NAT or SIT bitmap */
1331	if (flag == NAT_BITMAP)
1332		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
1333	else if (flag == SIT_BITMAP)
1334		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
1335
1336	return 0;
1337}
1338
1339static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
1340{
1341	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
1342}
1343
1344static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
1345{
1346	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1347	int offset;
1348
1349	if (__cp_payload(sbi) > 0) {
1350		if (flag == NAT_BITMAP)
1351			return &ckpt->sit_nat_version_bitmap;
1352		else
1353			return (unsigned char *)ckpt + F2FS_BLKSIZE;
1354	} else {
1355		offset = (flag == NAT_BITMAP) ?
1356			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
1357		return &ckpt->sit_nat_version_bitmap + offset;
1358	}
1359}
1360
1361static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
1362{
1363	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
 
 
 
 
1364
1365	if (sbi->cur_cp_pack == 2)
 
 
 
 
1366		start_addr += sbi->blocks_per_seg;
1367	return start_addr;
1368}
1369
1370static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
1371{
1372	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
1373
1374	if (sbi->cur_cp_pack == 1)
1375		start_addr += sbi->blocks_per_seg;
1376	return start_addr;
1377}
1378
1379static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
1380{
1381	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
1382}
1383
1384static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
1385{
1386	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
1387}
1388
1389static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
1390						struct inode *inode)
1391{
1392	block_t	valid_block_count;
1393	unsigned int valid_node_count;
1394
1395	spin_lock(&sbi->stat_lock);
1396
1397	valid_block_count = sbi->total_valid_block_count + 1;
1398	if (unlikely(valid_block_count > sbi->user_block_count)) {
1399		spin_unlock(&sbi->stat_lock);
1400		return false;
1401	}
1402
1403	valid_node_count = sbi->total_valid_node_count + 1;
1404	if (unlikely(valid_node_count > sbi->total_node_count)) {
1405		spin_unlock(&sbi->stat_lock);
1406		return false;
1407	}
1408
1409	if (inode)
1410		f2fs_i_blocks_write(inode, 1, true);
1411
 
1412	sbi->total_valid_node_count++;
1413	sbi->total_valid_block_count++;
1414	spin_unlock(&sbi->stat_lock);
1415
1416	percpu_counter_inc(&sbi->alloc_valid_block_count);
1417	return true;
1418}
1419
1420static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
1421						struct inode *inode)
1422{
1423	spin_lock(&sbi->stat_lock);
1424
1425	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
1426	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
1427	f2fs_bug_on(sbi, !inode->i_blocks);
1428
1429	f2fs_i_blocks_write(inode, 1, false);
1430	sbi->total_valid_node_count--;
1431	sbi->total_valid_block_count--;
1432
1433	spin_unlock(&sbi->stat_lock);
1434}
1435
1436static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
1437{
1438	return sbi->total_valid_node_count;
1439}
1440
1441static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
1442{
1443	percpu_counter_inc(&sbi->total_valid_inode_count);
 
 
 
1444}
1445
1446static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
1447{
1448	percpu_counter_dec(&sbi->total_valid_inode_count);
 
 
 
1449}
1450
1451static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
1452{
1453	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
1454}
1455
1456static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
1457						pgoff_t index, bool for_write)
1458{
1459#ifdef CONFIG_F2FS_FAULT_INJECTION
1460	struct page *page = find_lock_page(mapping, index);
1461	if (page)
1462		return page;
1463
1464	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
1465		return NULL;
1466#endif
1467	if (!for_write)
1468		return grab_cache_page(mapping, index);
1469	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
1470}
1471
1472static inline void f2fs_copy_page(struct page *src, struct page *dst)
1473{
1474	char *src_kaddr = kmap(src);
1475	char *dst_kaddr = kmap(dst);
1476
1477	memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
1478	kunmap(dst);
1479	kunmap(src);
1480}
1481
1482static inline void f2fs_put_page(struct page *page, int unlock)
1483{
1484	if (!page)
1485		return;
1486
1487	if (unlock) {
1488		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
1489		unlock_page(page);
1490	}
1491	put_page(page);
1492}
1493
1494static inline void f2fs_put_dnode(struct dnode_of_data *dn)
1495{
1496	if (dn->node_page)
1497		f2fs_put_page(dn->node_page, 1);
1498	if (dn->inode_page && dn->node_page != dn->inode_page)
1499		f2fs_put_page(dn->inode_page, 0);
1500	dn->node_page = NULL;
1501	dn->inode_page = NULL;
1502}
1503
1504static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
1505					size_t size)
1506{
1507	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
1508}
1509
1510static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
1511						gfp_t flags)
1512{
1513	void *entry;
 
 
 
 
 
 
1514
1515	entry = kmem_cache_alloc(cachep, flags);
1516	if (!entry)
1517		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
1518	return entry;
1519}
1520
1521static inline struct bio *f2fs_bio_alloc(int npages)
1522{
1523	struct bio *bio;
1524
1525	/* No failure on bio allocation */
1526	bio = bio_alloc(GFP_NOIO, npages);
1527	if (!bio)
1528		bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
1529	return bio;
1530}
1531
1532static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
1533				unsigned long index, void *item)
1534{
1535	while (radix_tree_insert(root, index, item))
1536		cond_resched();
1537}
1538
1539#define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
1540
1541static inline bool IS_INODE(struct page *page)
1542{
1543	struct f2fs_node *p = F2FS_NODE(page);
1544	return RAW_IS_INODE(p);
1545}
1546
1547static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
1548{
1549	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
1550}
1551
1552static inline block_t datablock_addr(struct page *node_page,
1553		unsigned int offset)
1554{
1555	struct f2fs_node *raw_node;
1556	__le32 *addr_array;
1557	raw_node = F2FS_NODE(node_page);
1558	addr_array = blkaddr_in_node(raw_node);
1559	return le32_to_cpu(addr_array[offset]);
1560}
1561
1562static inline int f2fs_test_bit(unsigned int nr, char *addr)
1563{
1564	int mask;
1565
1566	addr += (nr >> 3);
1567	mask = 1 << (7 - (nr & 0x07));
1568	return mask & *addr;
1569}
1570
1571static inline void f2fs_set_bit(unsigned int nr, char *addr)
1572{
1573	int mask;
1574
1575	addr += (nr >> 3);
1576	mask = 1 << (7 - (nr & 0x07));
1577	*addr |= mask;
1578}
1579
1580static inline void f2fs_clear_bit(unsigned int nr, char *addr)
1581{
1582	int mask;
1583
1584	addr += (nr >> 3);
1585	mask = 1 << (7 - (nr & 0x07));
1586	*addr &= ~mask;
1587}
1588
1589static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
1590{
1591	int mask;
1592	int ret;
1593
1594	addr += (nr >> 3);
1595	mask = 1 << (7 - (nr & 0x07));
1596	ret = mask & *addr;
1597	*addr |= mask;
1598	return ret;
1599}
1600
1601static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
1602{
1603	int mask;
1604	int ret;
1605
1606	addr += (nr >> 3);
1607	mask = 1 << (7 - (nr & 0x07));
1608	ret = mask & *addr;
1609	*addr &= ~mask;
1610	return ret;
1611}
1612
1613static inline void f2fs_change_bit(unsigned int nr, char *addr)
1614{
1615	int mask;
1616
1617	addr += (nr >> 3);
1618	mask = 1 << (7 - (nr & 0x07));
1619	*addr ^= mask;
1620}
1621
1622/* used for f2fs_inode_info->flags */
1623enum {
1624	FI_NEW_INODE,		/* indicate newly allocated inode */
1625	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
1626	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
1627	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
1628	FI_INC_LINK,		/* need to increment i_nlink */
1629	FI_ACL_MODE,		/* indicate acl mode */
1630	FI_NO_ALLOC,		/* should not allocate any blocks */
1631	FI_FREE_NID,		/* free allocated nide */
 
1632	FI_NO_EXTENT,		/* not to use the extent cache */
1633	FI_INLINE_XATTR,	/* used for inline xattr */
1634	FI_INLINE_DATA,		/* used for inline data*/
1635	FI_INLINE_DENTRY,	/* used for inline dentry */
1636	FI_APPEND_WRITE,	/* inode has appended data */
1637	FI_UPDATE_WRITE,	/* inode has in-place-update data */
1638	FI_NEED_IPU,		/* used for ipu per file */
1639	FI_ATOMIC_FILE,		/* indicate atomic file */
1640	FI_VOLATILE_FILE,	/* indicate volatile file */
1641	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
1642	FI_DROP_CACHE,		/* drop dirty page cache */
1643	FI_DATA_EXIST,		/* indicate data exists */
1644	FI_INLINE_DOTS,		/* indicate inline dot dentries */
1645	FI_DO_DEFRAG,		/* indicate defragment is running */
1646	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
1647};
1648
1649static inline void __mark_inode_dirty_flag(struct inode *inode,
1650						int flag, bool set)
1651{
1652	switch (flag) {
1653	case FI_INLINE_XATTR:
1654	case FI_INLINE_DATA:
1655	case FI_INLINE_DENTRY:
1656		if (set)
1657			return;
1658	case FI_DATA_EXIST:
1659	case FI_INLINE_DOTS:
1660		f2fs_mark_inode_dirty_sync(inode, true);
1661	}
1662}
1663
1664static inline void set_inode_flag(struct inode *inode, int flag)
1665{
1666	if (!test_bit(flag, &F2FS_I(inode)->flags))
1667		set_bit(flag, &F2FS_I(inode)->flags);
1668	__mark_inode_dirty_flag(inode, flag, true);
1669}
1670
1671static inline int is_inode_flag_set(struct inode *inode, int flag)
1672{
1673	return test_bit(flag, &F2FS_I(inode)->flags);
1674}
1675
1676static inline void clear_inode_flag(struct inode *inode, int flag)
1677{
1678	if (test_bit(flag, &F2FS_I(inode)->flags))
1679		clear_bit(flag, &F2FS_I(inode)->flags);
1680	__mark_inode_dirty_flag(inode, flag, false);
1681}
1682
1683static inline void set_acl_inode(struct inode *inode, umode_t mode)
1684{
1685	F2FS_I(inode)->i_acl_mode = mode;
1686	set_inode_flag(inode, FI_ACL_MODE);
1687	f2fs_mark_inode_dirty_sync(inode, false);
1688}
1689
1690static inline void f2fs_i_links_write(struct inode *inode, bool inc)
1691{
1692	if (inc)
1693		inc_nlink(inode);
1694	else
1695		drop_nlink(inode);
1696	f2fs_mark_inode_dirty_sync(inode, true);
1697}
1698
1699static inline void f2fs_i_blocks_write(struct inode *inode,
1700					blkcnt_t diff, bool add)
1701{
1702	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
1703	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
1704
1705	inode->i_blocks = add ? inode->i_blocks + diff :
1706				inode->i_blocks - diff;
1707	f2fs_mark_inode_dirty_sync(inode, true);
1708	if (clean || recover)
1709		set_inode_flag(inode, FI_AUTO_RECOVER);
1710}
1711
1712static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
1713{
1714	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
1715	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
1716
1717	if (i_size_read(inode) == i_size)
1718		return;
1719
1720	i_size_write(inode, i_size);
1721	f2fs_mark_inode_dirty_sync(inode, true);
1722	if (clean || recover)
1723		set_inode_flag(inode, FI_AUTO_RECOVER);
1724}
1725
1726static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
1727{
1728	F2FS_I(inode)->i_current_depth = depth;
1729	f2fs_mark_inode_dirty_sync(inode, true);
1730}
1731
1732static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
1733{
1734	F2FS_I(inode)->i_xattr_nid = xnid;
1735	f2fs_mark_inode_dirty_sync(inode, true);
1736}
1737
1738static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
 
1739{
1740	F2FS_I(inode)->i_pino = pino;
1741	f2fs_mark_inode_dirty_sync(inode, true);
1742}
1743
1744static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
1745{
1746	struct f2fs_inode_info *fi = F2FS_I(inode);
1747
1748	if (ri->i_inline & F2FS_INLINE_XATTR)
1749		set_bit(FI_INLINE_XATTR, &fi->flags);
1750	if (ri->i_inline & F2FS_INLINE_DATA)
1751		set_bit(FI_INLINE_DATA, &fi->flags);
1752	if (ri->i_inline & F2FS_INLINE_DENTRY)
1753		set_bit(FI_INLINE_DENTRY, &fi->flags);
1754	if (ri->i_inline & F2FS_DATA_EXIST)
1755		set_bit(FI_DATA_EXIST, &fi->flags);
1756	if (ri->i_inline & F2FS_INLINE_DOTS)
1757		set_bit(FI_INLINE_DOTS, &fi->flags);
1758}
1759
1760static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
 
1761{
1762	ri->i_inline = 0;
1763
1764	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
1765		ri->i_inline |= F2FS_INLINE_XATTR;
1766	if (is_inode_flag_set(inode, FI_INLINE_DATA))
1767		ri->i_inline |= F2FS_INLINE_DATA;
1768	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
1769		ri->i_inline |= F2FS_INLINE_DENTRY;
1770	if (is_inode_flag_set(inode, FI_DATA_EXIST))
1771		ri->i_inline |= F2FS_DATA_EXIST;
1772	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
1773		ri->i_inline |= F2FS_INLINE_DOTS;
1774}
1775
1776static inline int f2fs_has_inline_xattr(struct inode *inode)
1777{
1778	return is_inode_flag_set(inode, FI_INLINE_XATTR);
1779}
1780
1781static inline unsigned int addrs_per_inode(struct inode *inode)
1782{
1783	if (f2fs_has_inline_xattr(inode))
1784		return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
1785	return DEF_ADDRS_PER_INODE;
1786}
1787
1788static inline void *inline_xattr_addr(struct page *page)
1789{
1790	struct f2fs_inode *ri = F2FS_INODE(page);
1791	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
1792					F2FS_INLINE_XATTR_ADDRS]);
1793}
1794
1795static inline int inline_xattr_size(struct inode *inode)
1796{
1797	if (f2fs_has_inline_xattr(inode))
1798		return F2FS_INLINE_XATTR_ADDRS << 2;
1799	else
1800		return 0;
1801}
1802
1803static inline int f2fs_has_inline_data(struct inode *inode)
1804{
1805	return is_inode_flag_set(inode, FI_INLINE_DATA);
1806}
1807
1808static inline void f2fs_clear_inline_inode(struct inode *inode)
1809{
1810	clear_inode_flag(inode, FI_INLINE_DATA);
1811	clear_inode_flag(inode, FI_DATA_EXIST);
1812}
1813
1814static inline int f2fs_exist_data(struct inode *inode)
1815{
1816	return is_inode_flag_set(inode, FI_DATA_EXIST);
1817}
1818
1819static inline int f2fs_has_inline_dots(struct inode *inode)
1820{
1821	return is_inode_flag_set(inode, FI_INLINE_DOTS);
1822}
1823
1824static inline bool f2fs_is_atomic_file(struct inode *inode)
1825{
1826	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
1827}
1828
1829static inline bool f2fs_is_volatile_file(struct inode *inode)
1830{
1831	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
1832}
1833
1834static inline bool f2fs_is_first_block_written(struct inode *inode)
1835{
1836	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
1837}
1838
1839static inline bool f2fs_is_drop_cache(struct inode *inode)
1840{
1841	return is_inode_flag_set(inode, FI_DROP_CACHE);
1842}
1843
1844static inline void *inline_data_addr(struct page *page)
1845{
1846	struct f2fs_inode *ri = F2FS_INODE(page);
1847	return (void *)&(ri->i_addr[1]);
1848}
1849
1850static inline int f2fs_has_inline_dentry(struct inode *inode)
1851{
1852	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
1853}
1854
1855static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
1856{
1857	if (!f2fs_has_inline_dentry(dir))
1858		kunmap(page);
1859}
1860
1861static inline int is_file(struct inode *inode, int type)
1862{
1863	return F2FS_I(inode)->i_advise & type;
1864}
1865
1866static inline void set_file(struct inode *inode, int type)
1867{
1868	F2FS_I(inode)->i_advise |= type;
1869	f2fs_mark_inode_dirty_sync(inode, true);
1870}
1871
1872static inline void clear_file(struct inode *inode, int type)
1873{
1874	F2FS_I(inode)->i_advise &= ~type;
1875	f2fs_mark_inode_dirty_sync(inode, true);
1876}
1877
1878static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
1879{
1880	if (dsync) {
1881		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1882		bool ret;
1883
1884		spin_lock(&sbi->inode_lock[DIRTY_META]);
1885		ret = list_empty(&F2FS_I(inode)->gdirty_list);
1886		spin_unlock(&sbi->inode_lock[DIRTY_META]);
1887		return ret;
1888	}
1889	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
1890			file_keep_isize(inode) ||
1891			i_size_read(inode) & PAGE_MASK)
1892		return false;
1893	return F2FS_I(inode)->last_disk_size == i_size_read(inode);
1894}
1895
1896static inline int f2fs_readonly(struct super_block *sb)
1897{
1898	return sb->s_flags & MS_RDONLY;
1899}
1900
1901static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
1902{
1903	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
1904}
1905
1906static inline bool is_dot_dotdot(const struct qstr *str)
1907{
1908	if (str->len == 1 && str->name[0] == '.')
1909		return true;
1910
1911	if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
1912		return true;
1913
1914	return false;
1915}
1916
1917static inline bool f2fs_may_extent_tree(struct inode *inode)
1918{
1919	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
1920			is_inode_flag_set(inode, FI_NO_EXTENT))
1921		return false;
1922
1923	return S_ISREG(inode->i_mode);
1924}
1925
1926static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
1927					size_t size, gfp_t flags)
1928{
1929#ifdef CONFIG_F2FS_FAULT_INJECTION
1930	if (time_to_inject(sbi, FAULT_KMALLOC))
1931		return NULL;
1932#endif
1933	return kmalloc(size, flags);
1934}
1935
1936static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
1937{
1938	void *ret;
1939
1940	ret = kmalloc(size, flags | __GFP_NOWARN);
1941	if (!ret)
1942		ret = __vmalloc(size, flags, PAGE_KERNEL);
1943	return ret;
1944}
1945
1946static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
1947{
1948	void *ret;
1949
1950	ret = kzalloc(size, flags | __GFP_NOWARN);
1951	if (!ret)
1952		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
1953	return ret;
1954}
1955
1956#define get_inode_mode(i) \
1957	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
1958	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
1959
1960/* get offset of first page in next direct node */
1961#define PGOFS_OF_NEXT_DNODE(pgofs, inode)				\
1962	((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) :	\
1963	(pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) /	\
1964	ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
1965
1966/*
1967 * file.c
1968 */
1969int f2fs_sync_file(struct file *, loff_t, loff_t, int);
1970void truncate_data_blocks(struct dnode_of_data *);
1971int truncate_blocks(struct inode *, u64, bool);
1972int f2fs_truncate(struct inode *);
1973int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
1974int f2fs_setattr(struct dentry *, struct iattr *);
1975int truncate_hole(struct inode *, pgoff_t, pgoff_t);
1976int truncate_data_blocks_range(struct dnode_of_data *, int);
1977long f2fs_ioctl(struct file *, unsigned int, unsigned long);
1978long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
1979
1980/*
1981 * inode.c
1982 */
1983void f2fs_set_inode_flags(struct inode *);
1984struct inode *f2fs_iget(struct super_block *, unsigned long);
1985struct inode *f2fs_iget_retry(struct super_block *, unsigned long);
1986int try_to_free_nats(struct f2fs_sb_info *, int);
1987int update_inode(struct inode *, struct page *);
1988int update_inode_page(struct inode *);
1989int f2fs_write_inode(struct inode *, struct writeback_control *);
1990void f2fs_evict_inode(struct inode *);
1991void handle_failed_inode(struct inode *);
1992
1993/*
1994 * namei.c
1995 */
1996struct dentry *f2fs_get_parent(struct dentry *child);
1997
1998/*
1999 * dir.c
2000 */
2001void set_de_type(struct f2fs_dir_entry *, umode_t);
2002unsigned char get_de_type(struct f2fs_dir_entry *);
2003struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
2004			f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
2005int f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
2006			unsigned int, struct fscrypt_str *);
2007void do_make_empty_dir(struct inode *, struct inode *,
2008			struct f2fs_dentry_ptr *);
2009struct page *init_inode_metadata(struct inode *, struct inode *,
2010		const struct qstr *, const struct qstr *, struct page *);
2011void update_parent_metadata(struct inode *, struct inode *, unsigned int);
2012int room_for_filename(const void *, int, int);
2013void f2fs_drop_nlink(struct inode *, struct inode *);
2014struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *,
2015							struct page **);
2016struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
2017							struct page **);
2018struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
2019ino_t f2fs_inode_by_name(struct inode *, const struct qstr *, struct page **);
2020void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
2021				struct page *, struct inode *);
2022int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
2023void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
2024			const struct qstr *, f2fs_hash_t , unsigned int);
2025int f2fs_add_regular_entry(struct inode *, const struct qstr *,
2026			const struct qstr *, struct inode *, nid_t, umode_t);
2027int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *,
2028			nid_t, umode_t);
2029int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
2030			umode_t);
2031void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
2032							struct inode *);
2033int f2fs_do_tmpfile(struct inode *, struct inode *);
2034bool f2fs_empty_dir(struct inode *);
2035
2036static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
2037{
2038	return __f2fs_add_link(d_inode(dentry->d_parent), &dentry->d_name,
2039				inode, inode->i_ino, inode->i_mode);
2040}
2041
2042/*
2043 * super.c
2044 */
2045int f2fs_inode_dirtied(struct inode *, bool);
2046void f2fs_inode_synced(struct inode *);
2047int f2fs_commit_super(struct f2fs_sb_info *, bool);
2048int f2fs_sync_fs(struct super_block *, int);
2049extern __printf(3, 4)
2050void f2fs_msg(struct super_block *, const char *, const char *, ...);
2051int sanity_check_ckpt(struct f2fs_sb_info *sbi);
2052
2053/*
2054 * hash.c
2055 */
2056f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
2057
2058/*
2059 * node.c
2060 */
2061struct dnode_of_data;
2062struct node_info;
2063
2064bool available_free_memory(struct f2fs_sb_info *, int);
2065int need_dentry_mark(struct f2fs_sb_info *, nid_t);
2066bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
2067bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
2068void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
2069pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t);
2070int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
2071int truncate_inode_blocks(struct inode *, pgoff_t);
2072int truncate_xattr_node(struct inode *, struct page *);
2073int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
2074int remove_inode_page(struct inode *);
2075struct page *new_inode_page(struct inode *);
2076struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
2077void ra_node_page(struct f2fs_sb_info *, nid_t);
2078struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
2079struct page *get_node_page_ra(struct page *, int);
2080void move_node_page(struct page *, int);
2081int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
2082			struct writeback_control *, bool);
2083int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
2084void build_free_nids(struct f2fs_sb_info *, bool);
2085bool alloc_nid(struct f2fs_sb_info *, nid_t *);
2086void alloc_nid_done(struct f2fs_sb_info *, nid_t);
2087void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
2088int try_to_free_nids(struct f2fs_sb_info *, int);
2089void recover_inline_xattr(struct inode *, struct page *);
2090void recover_xattr_data(struct inode *, struct page *, block_t);
2091int recover_inode_page(struct f2fs_sb_info *, struct page *);
2092int restore_node_summary(struct f2fs_sb_info *, unsigned int,
2093				struct f2fs_summary_block *);
2094void flush_nat_entries(struct f2fs_sb_info *);
2095int build_node_manager(struct f2fs_sb_info *);
2096void destroy_node_manager(struct f2fs_sb_info *);
2097int __init create_node_manager_caches(void);
2098void destroy_node_manager_caches(void);
2099
2100/*
2101 * segment.c
2102 */
2103void register_inmem_page(struct inode *, struct page *);
2104void drop_inmem_pages(struct inode *);
2105int commit_inmem_pages(struct inode *);
2106void f2fs_balance_fs(struct f2fs_sb_info *, bool);
2107void f2fs_balance_fs_bg(struct f2fs_sb_info *);
2108int f2fs_issue_flush(struct f2fs_sb_info *);
2109int create_flush_cmd_control(struct f2fs_sb_info *);
2110void destroy_flush_cmd_control(struct f2fs_sb_info *, bool);
2111void invalidate_blocks(struct f2fs_sb_info *, block_t);
2112bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
2113void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
2114void f2fs_wait_all_discard_bio(struct f2fs_sb_info *);
2115void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
2116void release_discard_addrs(struct f2fs_sb_info *);
2117int npages_for_summary_flush(struct f2fs_sb_info *, bool);
2118void allocate_new_segments(struct f2fs_sb_info *);
2119int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
2120struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
2121void update_meta_page(struct f2fs_sb_info *, void *, block_t);
2122void write_meta_page(struct f2fs_sb_info *, struct page *);
2123void write_node_page(unsigned int, struct f2fs_io_info *);
2124void write_data_page(struct dnode_of_data *, struct f2fs_io_info *);
2125void rewrite_data_page(struct f2fs_io_info *);
2126void __f2fs_replace_block(struct f2fs_sb_info *, struct f2fs_summary *,
2127					block_t, block_t, bool, bool);
2128void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
2129				block_t, block_t, unsigned char, bool, bool);
 
 
2130void allocate_data_block(struct f2fs_sb_info *, struct page *,
2131		block_t, block_t *, struct f2fs_summary *, int);
2132void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
2133void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
2134void write_data_summaries(struct f2fs_sb_info *, block_t);
2135void write_node_summaries(struct f2fs_sb_info *, block_t);
2136int lookup_journal_in_cursum(struct f2fs_journal *, int, unsigned int, int);
2137void flush_sit_entries(struct f2fs_sb_info *, struct cp_control *);
 
2138int build_segment_manager(struct f2fs_sb_info *);
2139void destroy_segment_manager(struct f2fs_sb_info *);
2140int __init create_segment_manager_caches(void);
2141void destroy_segment_manager_caches(void);
2142
2143/*
2144 * checkpoint.c
2145 */
2146void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool);
2147struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
2148struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
2149struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
2150bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
2151int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
2152void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
2153long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
2154void add_ino_entry(struct f2fs_sb_info *, nid_t, int type);
2155void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type);
2156void release_ino_entry(struct f2fs_sb_info *, bool);
2157bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
2158int f2fs_sync_inode_meta(struct f2fs_sb_info *);
2159int acquire_orphan_inode(struct f2fs_sb_info *);
2160void release_orphan_inode(struct f2fs_sb_info *);
2161void add_orphan_inode(struct inode *);
2162void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
2163int recover_orphan_inodes(struct f2fs_sb_info *);
2164int get_valid_checkpoint(struct f2fs_sb_info *);
2165void update_dirty_page(struct inode *, struct page *);
2166void remove_dirty_inode(struct inode *);
2167int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type);
2168int write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
2169void init_ino_entry_info(struct f2fs_sb_info *);
 
 
2170int __init create_checkpoint_caches(void);
2171void destroy_checkpoint_caches(void);
2172
2173/*
2174 * data.c
2175 */
2176void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
2177void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
2178				struct page *, nid_t, enum page_type, int);
2179void f2fs_flush_merged_bios(struct f2fs_sb_info *);
2180int f2fs_submit_page_bio(struct f2fs_io_info *);
2181void f2fs_submit_page_mbio(struct f2fs_io_info *);
2182struct block_device *f2fs_target_device(struct f2fs_sb_info *,
2183				block_t, struct bio *);
2184int f2fs_target_device_index(struct f2fs_sb_info *, block_t);
2185void set_data_blkaddr(struct dnode_of_data *);
2186void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
2187int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
2188int reserve_new_block(struct dnode_of_data *);
2189int f2fs_get_block(struct dnode_of_data *, pgoff_t);
2190int f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
2191int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
2192struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
2193struct page *find_data_page(struct inode *, pgoff_t);
2194struct page *get_lock_data_page(struct inode *, pgoff_t, bool);
2195struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
2196int do_write_data_page(struct f2fs_io_info *);
2197int f2fs_map_blocks(struct inode *, struct f2fs_map_blocks *, int, int);
2198int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
2199void f2fs_set_page_dirty_nobuffers(struct page *);
2200void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
2201int f2fs_release_page(struct page *, gfp_t);
2202#ifdef CONFIG_MIGRATION
2203int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
2204				enum migrate_mode);
2205#endif
2206
2207/*
2208 * gc.c
2209 */
2210int start_gc_thread(struct f2fs_sb_info *);
2211void stop_gc_thread(struct f2fs_sb_info *);
2212block_t start_bidx_of_node(unsigned int, struct inode *);
2213int f2fs_gc(struct f2fs_sb_info *, bool, bool);
2214void build_gc_manager(struct f2fs_sb_info *);
 
 
2215
2216/*
2217 * recovery.c
2218 */
2219int recover_fsync_data(struct f2fs_sb_info *, bool);
2220bool space_for_roll_forward(struct f2fs_sb_info *);
2221
2222/*
2223 * debug.c
2224 */
2225#ifdef CONFIG_F2FS_STAT_FS
2226struct f2fs_stat_info {
2227	struct list_head stat_list;
2228	struct f2fs_sb_info *sbi;
 
2229	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
2230	int main_area_segs, main_area_sections, main_area_zones;
2231	unsigned long long hit_largest, hit_cached, hit_rbtree;
2232	unsigned long long hit_total, total_ext;
2233	int ext_tree, zombie_tree, ext_node;
2234	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
2235	int inmem_pages;
2236	unsigned int ndirty_dirs, ndirty_files, ndirty_all;
2237	int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
2238	int total_count, utilization;
2239	int bg_gc, nr_wb_cp_data, nr_wb_data;
2240	int inline_xattr, inline_inode, inline_dir, orphans;
2241	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
2242	unsigned int bimodal, avg_vblocks;
2243	int util_free, util_valid, util_invalid;
2244	int rsvd_segs, overp_segs;
2245	int dirty_count, node_pages, meta_pages;
2246	int prefree_count, call_count, cp_count, bg_cp_count;
2247	int tot_segs, node_segs, data_segs, free_segs, free_secs;
2248	int bg_node_segs, bg_data_segs;
2249	int tot_blks, data_blks, node_blks;
2250	int bg_data_blks, bg_node_blks;
2251	int curseg[NR_CURSEG_TYPE];
2252	int cursec[NR_CURSEG_TYPE];
2253	int curzone[NR_CURSEG_TYPE];
2254
2255	unsigned int segment_count[2];
2256	unsigned int block_count[2];
2257	unsigned int inplace_count;
2258	unsigned long long base_mem, cache_mem, page_mem;
2259};
2260
2261static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
2262{
2263	return (struct f2fs_stat_info *)sbi->stat_info;
2264}
2265
2266#define stat_inc_cp_count(si)		((si)->cp_count++)
2267#define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
2268#define stat_inc_call_count(si)		((si)->call_count++)
2269#define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
2270#define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
2271#define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
2272#define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
2273#define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
2274#define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
2275#define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
2276#define stat_inc_inline_xattr(inode)					\
2277	do {								\
2278		if (f2fs_has_inline_xattr(inode))			\
2279			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
2280	} while (0)
2281#define stat_dec_inline_xattr(inode)					\
2282	do {								\
2283		if (f2fs_has_inline_xattr(inode))			\
2284			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
2285	} while (0)
2286#define stat_inc_inline_inode(inode)					\
2287	do {								\
2288		if (f2fs_has_inline_data(inode))			\
2289			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
2290	} while (0)
2291#define stat_dec_inline_inode(inode)					\
2292	do {								\
2293		if (f2fs_has_inline_data(inode))			\
2294			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
2295	} while (0)
2296#define stat_inc_inline_dir(inode)					\
2297	do {								\
2298		if (f2fs_has_inline_dentry(inode))			\
2299			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
2300	} while (0)
2301#define stat_dec_inline_dir(inode)					\
2302	do {								\
2303		if (f2fs_has_inline_dentry(inode))			\
2304			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
2305	} while (0)
 
2306#define stat_inc_seg_type(sbi, curseg)					\
2307		((sbi)->segment_count[(curseg)->alloc_type]++)
2308#define stat_inc_block_count(sbi, curseg)				\
2309		((sbi)->block_count[(curseg)->alloc_type]++)
2310#define stat_inc_inplace_blocks(sbi)					\
2311		(atomic_inc(&(sbi)->inplace_count))
2312#define stat_inc_seg_count(sbi, type, gc_type)				\
2313	do {								\
2314		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
2315		(si)->tot_segs++;					\
2316		if (type == SUM_TYPE_DATA) {				\
2317			si->data_segs++;				\
2318			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
2319		} else {						\
2320			si->node_segs++;				\
2321			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
2322		}							\
2323	} while (0)
2324
2325#define stat_inc_tot_blk_count(si, blks)				\
2326	(si->tot_blks += (blks))
2327
2328#define stat_inc_data_blk_count(sbi, blks, gc_type)			\
2329	do {								\
2330		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
2331		stat_inc_tot_blk_count(si, blks);			\
2332		si->data_blks += (blks);				\
2333		si->bg_data_blks += (gc_type == BG_GC) ? (blks) : 0;	\
2334	} while (0)
2335
2336#define stat_inc_node_blk_count(sbi, blks, gc_type)			\
2337	do {								\
2338		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
2339		stat_inc_tot_blk_count(si, blks);			\
2340		si->node_blks += (blks);				\
2341		si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0;	\
2342	} while (0)
2343
2344int f2fs_build_stats(struct f2fs_sb_info *);
2345void f2fs_destroy_stats(struct f2fs_sb_info *);
2346int __init f2fs_create_root_stats(void);
2347void f2fs_destroy_root_stats(void);
2348#else
2349#define stat_inc_cp_count(si)
2350#define stat_inc_bg_cp_count(si)
2351#define stat_inc_call_count(si)
2352#define stat_inc_bggc_count(si)
2353#define stat_inc_dirty_inode(sbi, type)
2354#define stat_dec_dirty_inode(sbi, type)
2355#define stat_inc_total_hit(sb)
2356#define stat_inc_rbtree_node_hit(sb)
2357#define stat_inc_largest_node_hit(sbi)
2358#define stat_inc_cached_node_hit(sbi)
2359#define stat_inc_inline_xattr(inode)
2360#define stat_dec_inline_xattr(inode)
2361#define stat_inc_inline_inode(inode)
2362#define stat_dec_inline_inode(inode)
2363#define stat_inc_inline_dir(inode)
2364#define stat_dec_inline_dir(inode)
2365#define stat_inc_seg_type(sbi, curseg)
2366#define stat_inc_block_count(sbi, curseg)
2367#define stat_inc_inplace_blocks(sbi)
2368#define stat_inc_seg_count(sbi, type, gc_type)
2369#define stat_inc_tot_blk_count(si, blks)
2370#define stat_inc_data_blk_count(sbi, blks, gc_type)
2371#define stat_inc_node_blk_count(sbi, blks, gc_type)
2372
2373static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
2374static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
2375static inline int __init f2fs_create_root_stats(void) { return 0; }
2376static inline void f2fs_destroy_root_stats(void) { }
2377#endif
2378
2379extern const struct file_operations f2fs_dir_operations;
2380extern const struct file_operations f2fs_file_operations;
2381extern const struct inode_operations f2fs_file_inode_operations;
2382extern const struct address_space_operations f2fs_dblock_aops;
2383extern const struct address_space_operations f2fs_node_aops;
2384extern const struct address_space_operations f2fs_meta_aops;
2385extern const struct inode_operations f2fs_dir_inode_operations;
2386extern const struct inode_operations f2fs_symlink_inode_operations;
2387extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
2388extern const struct inode_operations f2fs_special_inode_operations;
2389extern struct kmem_cache *inode_entry_slab;
2390
2391/*
2392 * inline.c
2393 */
2394bool f2fs_may_inline_data(struct inode *);
2395bool f2fs_may_inline_dentry(struct inode *);
2396void read_inline_data(struct page *, struct page *);
2397bool truncate_inline_inode(struct page *, u64);
2398int f2fs_read_inline_data(struct inode *, struct page *);
2399int f2fs_convert_inline_page(struct dnode_of_data *, struct page *);
2400int f2fs_convert_inline_inode(struct inode *);
2401int f2fs_write_inline_data(struct inode *, struct page *);
2402bool recover_inline_data(struct inode *, struct page *);
2403struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
2404				struct fscrypt_name *, struct page **);
2405int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
2406int f2fs_add_inline_entry(struct inode *, const struct qstr *,
2407		const struct qstr *, struct inode *, nid_t, umode_t);
2408void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
2409						struct inode *, struct inode *);
2410bool f2fs_empty_inline_dir(struct inode *);
2411int f2fs_read_inline_dir(struct file *, struct dir_context *,
2412						struct fscrypt_str *);
2413int f2fs_inline_data_fiemap(struct inode *,
2414		struct fiemap_extent_info *, __u64, __u64);
2415
2416/*
2417 * shrinker.c
2418 */
2419unsigned long f2fs_shrink_count(struct shrinker *, struct shrink_control *);
2420unsigned long f2fs_shrink_scan(struct shrinker *, struct shrink_control *);
2421void f2fs_join_shrinker(struct f2fs_sb_info *);
2422void f2fs_leave_shrinker(struct f2fs_sb_info *);
2423
2424/*
2425 * extent_cache.c
2426 */
2427unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
2428bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
2429void f2fs_drop_extent_tree(struct inode *);
2430unsigned int f2fs_destroy_extent_node(struct inode *);
2431void f2fs_destroy_extent_tree(struct inode *);
2432bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
2433void f2fs_update_extent_cache(struct dnode_of_data *);
2434void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
2435						pgoff_t, block_t, unsigned int);
2436void init_extent_cache_info(struct f2fs_sb_info *);
2437int __init create_extent_cache(void);
2438void destroy_extent_cache(void);
2439
2440/*
2441 * crypto support
2442 */
2443static inline bool f2fs_encrypted_inode(struct inode *inode)
2444{
2445	return file_is_encrypt(inode);
2446}
2447
2448static inline void f2fs_set_encrypted_inode(struct inode *inode)
2449{
2450#ifdef CONFIG_F2FS_FS_ENCRYPTION
2451	file_set_encrypt(inode);
2452#endif
2453}
2454
2455static inline bool f2fs_bio_encrypted(struct bio *bio)
2456{
2457	return bio->bi_private != NULL;
2458}
2459
2460static inline int f2fs_sb_has_crypto(struct super_block *sb)
2461{
2462	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
2463}
2464
2465static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
2466{
2467	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
2468}
2469
2470#ifdef CONFIG_BLK_DEV_ZONED
2471static inline int get_blkz_type(struct f2fs_sb_info *sbi,
2472			struct block_device *bdev, block_t blkaddr)
2473{
2474	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
2475	int i;
2476
2477	for (i = 0; i < sbi->s_ndevs; i++)
2478		if (FDEV(i).bdev == bdev)
2479			return FDEV(i).blkz_type[zno];
2480	return -EINVAL;
2481}
2482#endif
2483
2484static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
2485{
2486	struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
2487
2488	return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
2489}
2490
2491static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
2492{
2493	clear_opt(sbi, ADAPTIVE);
2494	clear_opt(sbi, LFS);
2495
2496	switch (mt) {
2497	case F2FS_MOUNT_ADAPTIVE:
2498		set_opt(sbi, ADAPTIVE);
2499		break;
2500	case F2FS_MOUNT_LFS:
2501		set_opt(sbi, LFS);
2502		break;
2503	}
2504}
2505
2506static inline bool f2fs_may_encrypt(struct inode *inode)
2507{
2508#ifdef CONFIG_F2FS_FS_ENCRYPTION
2509	umode_t mode = inode->i_mode;
2510
2511	return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
2512#else
2513	return 0;
2514#endif
2515}
2516
2517#ifndef CONFIG_F2FS_FS_ENCRYPTION
2518#define fscrypt_set_d_op(i)
2519#define fscrypt_get_ctx			fscrypt_notsupp_get_ctx
2520#define fscrypt_release_ctx		fscrypt_notsupp_release_ctx
2521#define fscrypt_encrypt_page		fscrypt_notsupp_encrypt_page
2522#define fscrypt_decrypt_page		fscrypt_notsupp_decrypt_page
2523#define fscrypt_decrypt_bio_pages	fscrypt_notsupp_decrypt_bio_pages
2524#define fscrypt_pullback_bio_page	fscrypt_notsupp_pullback_bio_page
2525#define fscrypt_restore_control_page	fscrypt_notsupp_restore_control_page
2526#define fscrypt_zeroout_range		fscrypt_notsupp_zeroout_range
2527#define fscrypt_ioctl_set_policy	fscrypt_notsupp_ioctl_set_policy
2528#define fscrypt_ioctl_get_policy	fscrypt_notsupp_ioctl_get_policy
2529#define fscrypt_has_permitted_context	fscrypt_notsupp_has_permitted_context
2530#define fscrypt_inherit_context		fscrypt_notsupp_inherit_context
2531#define fscrypt_get_encryption_info	fscrypt_notsupp_get_encryption_info
2532#define fscrypt_put_encryption_info	fscrypt_notsupp_put_encryption_info
2533#define fscrypt_setup_filename		fscrypt_notsupp_setup_filename
2534#define fscrypt_free_filename		fscrypt_notsupp_free_filename
2535#define fscrypt_fname_encrypted_size	fscrypt_notsupp_fname_encrypted_size
2536#define fscrypt_fname_alloc_buffer	fscrypt_notsupp_fname_alloc_buffer
2537#define fscrypt_fname_free_buffer	fscrypt_notsupp_fname_free_buffer
2538#define fscrypt_fname_disk_to_usr	fscrypt_notsupp_fname_disk_to_usr
2539#define fscrypt_fname_usr_to_disk	fscrypt_notsupp_fname_usr_to_disk
2540#endif
2541#endif