Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * fs/f2fs/file.c
   4 *
   5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   6 *             http://www.samsung.com/
 
 
 
 
   7 */
   8#include <linux/fs.h>
   9#include <linux/f2fs_fs.h>
  10#include <linux/stat.h>
  11#include <linux/buffer_head.h>
  12#include <linux/writeback.h>
  13#include <linux/blkdev.h>
  14#include <linux/falloc.h>
  15#include <linux/types.h>
  16#include <linux/compat.h>
  17#include <linux/uaccess.h>
  18#include <linux/mount.h>
  19#include <linux/pagevec.h>
  20#include <linux/uio.h>
  21#include <linux/uuid.h>
  22#include <linux/file.h>
  23#include <linux/nls.h>
  24#include <linux/sched/signal.h>
  25#include <linux/fileattr.h>
  26
  27#include "f2fs.h"
  28#include "node.h"
  29#include "segment.h"
  30#include "xattr.h"
  31#include "acl.h"
  32#include "gc.h"
 
  33#include <trace/events/f2fs.h>
  34#include <uapi/linux/f2fs.h>
  35
  36static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
  37{
  38	struct inode *inode = file_inode(vmf->vma->vm_file);
  39	vm_fault_t ret;
  40
  41	down_read(&F2FS_I(inode)->i_mmap_sem);
  42	ret = filemap_fault(vmf);
  43	up_read(&F2FS_I(inode)->i_mmap_sem);
  44
  45	if (!ret)
  46		f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
  47							F2FS_BLKSIZE);
  48
  49	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
  50
  51	return ret;
  52}
  53
  54static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
  55{
  56	struct page *page = vmf->page;
  57	struct inode *inode = file_inode(vmf->vma->vm_file);
  58	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  59	struct dnode_of_data dn;
  60	bool need_alloc = true;
  61	int err = 0;
  62
  63	if (unlikely(IS_IMMUTABLE(inode)))
  64		return VM_FAULT_SIGBUS;
  65
  66	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
  67		return VM_FAULT_SIGBUS;
  68
  69	if (unlikely(f2fs_cp_error(sbi))) {
  70		err = -EIO;
  71		goto err;
  72	}
  73
  74	if (!f2fs_is_checkpoint_ready(sbi)) {
  75		err = -ENOSPC;
  76		goto err;
  77	}
  78
  79	err = f2fs_convert_inline_inode(inode);
  80	if (err)
  81		goto err;
  82
  83#ifdef CONFIG_F2FS_FS_COMPRESSION
  84	if (f2fs_compressed_file(inode)) {
  85		int ret = f2fs_is_compressed_cluster(inode, page->index);
  86
  87		if (ret < 0) {
  88			err = ret;
  89			goto err;
  90		} else if (ret) {
  91			need_alloc = false;
  92		}
  93	}
  94#endif
  95	/* should do out of any locked page */
  96	if (need_alloc)
  97		f2fs_balance_fs(sbi, true);
  98
  99	sb_start_pagefault(inode->i_sb);
 100
 101	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
 102
 103	file_update_time(vmf->vma->vm_file);
 104	down_read(&F2FS_I(inode)->i_mmap_sem);
 105	lock_page(page);
 106	if (unlikely(page->mapping != inode->i_mapping ||
 107			page_offset(page) > i_size_read(inode) ||
 108			!PageUptodate(page))) {
 109		unlock_page(page);
 110		err = -EFAULT;
 111		goto out_sem;
 112	}
 113
 114	if (need_alloc) {
 115		/* block allocation */
 116		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
 117		set_new_dnode(&dn, inode, NULL, NULL, 0);
 118		err = f2fs_get_block(&dn, page->index);
 119		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
 120	}
 121
 122#ifdef CONFIG_F2FS_FS_COMPRESSION
 123	if (!need_alloc) {
 124		set_new_dnode(&dn, inode, NULL, NULL, 0);
 125		err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 126		f2fs_put_dnode(&dn);
 127	}
 128#endif
 129	if (err) {
 130		unlock_page(page);
 131		goto out_sem;
 132	}
 133
 134	f2fs_wait_on_page_writeback(page, DATA, false, true);
 135
 136	/* wait for GCed page writeback via META_MAPPING */
 137	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 138
 139	/*
 140	 * check to see if the page is mapped already (no holes)
 141	 */
 142	if (PageMappedToDisk(page))
 143		goto out_sem;
 144
 145	/* page is wholly or partially inside EOF */
 146	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
 147						i_size_read(inode)) {
 148		loff_t offset;
 149
 150		offset = i_size_read(inode) & ~PAGE_MASK;
 151		zero_user_segment(page, offset, PAGE_SIZE);
 152	}
 153	set_page_dirty(page);
 154	if (!PageUptodate(page))
 155		SetPageUptodate(page);
 156
 157	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
 158	f2fs_update_time(sbi, REQ_TIME);
 159
 160	trace_f2fs_vm_page_mkwrite(page, DATA);
 161out_sem:
 162	up_read(&F2FS_I(inode)->i_mmap_sem);
 
 
 
 
 
 163
 
 
 
 164	sb_end_pagefault(inode->i_sb);
 165err:
 166	return block_page_mkwrite_return(err);
 167}
 168
 169static const struct vm_operations_struct f2fs_file_vm_ops = {
 170	.fault		= f2fs_filemap_fault,
 171	.map_pages	= filemap_map_pages,
 172	.page_mkwrite	= f2fs_vm_page_mkwrite,
 173};
 174
 175static int get_parent_ino(struct inode *inode, nid_t *pino)
 176{
 177	struct dentry *dentry;
 178
 179	/*
 180	 * Make sure to get the non-deleted alias.  The alias associated with
 181	 * the open file descriptor being fsync()'ed may be deleted already.
 182	 */
 183	dentry = d_find_alias(inode);
 184	if (!dentry)
 185		return 0;
 186
 
 
 
 
 
 187	*pino = parent_ino(dentry);
 188	dput(dentry);
 189	return 1;
 190}
 191
 192static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
 193{
 194	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 195	enum cp_reason_type cp_reason = CP_NO_NEEDED;
 196
 197	if (!S_ISREG(inode->i_mode))
 198		cp_reason = CP_NON_REGULAR;
 199	else if (f2fs_compressed_file(inode))
 200		cp_reason = CP_COMPRESSED;
 201	else if (inode->i_nlink != 1)
 202		cp_reason = CP_HARDLINK;
 203	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
 204		cp_reason = CP_SB_NEED_CP;
 205	else if (file_wrong_pino(inode))
 206		cp_reason = CP_WRONG_PINO;
 207	else if (!f2fs_space_for_roll_forward(sbi))
 208		cp_reason = CP_NO_SPC_ROLL;
 209	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
 210		cp_reason = CP_NODE_NEED_CP;
 
 
 211	else if (test_opt(sbi, FASTBOOT))
 212		cp_reason = CP_FASTBOOT_MODE;
 213	else if (F2FS_OPTION(sbi).active_logs == 2)
 214		cp_reason = CP_SPEC_LOG_NUM;
 215	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
 216		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
 217		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
 218							TRANS_DIR_INO))
 219		cp_reason = CP_RECOVER_DIR;
 220
 221	return cp_reason;
 222}
 223
 224static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
 225{
 226	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
 227	bool ret = false;
 228	/* But we need to avoid that there are some inode updates */
 229	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
 230		ret = true;
 231	f2fs_put_page(i, 0);
 232	return ret;
 233}
 234
 235static void try_to_fix_pino(struct inode *inode)
 236{
 237	struct f2fs_inode_info *fi = F2FS_I(inode);
 238	nid_t pino;
 239
 240	down_write(&fi->i_sem);
 
 241	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
 242			get_parent_ino(inode, &pino)) {
 243		f2fs_i_pino_write(inode, pino);
 244		file_got_pino(inode);
 
 
 
 
 
 
 245	}
 246	up_write(&fi->i_sem);
 247}
 248
 249static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
 250						int datasync, bool atomic)
 251{
 252	struct inode *inode = file->f_mapping->host;
 
 253	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 254	nid_t ino = inode->i_ino;
 255	int ret = 0;
 256	enum cp_reason_type cp_reason = 0;
 257	struct writeback_control wbc = {
 258		.sync_mode = WB_SYNC_ALL,
 259		.nr_to_write = LONG_MAX,
 260		.for_reclaim = 0,
 261	};
 262	unsigned int seq_id = 0;
 263
 264	if (unlikely(f2fs_readonly(inode->i_sb)))
 265		return 0;
 266
 267	trace_f2fs_sync_file_enter(inode);
 268
 269	if (S_ISDIR(inode->i_mode))
 270		goto go_write;
 271
 272	/* if fdatasync is triggered, let's do in-place-update */
 273	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
 274		set_inode_flag(inode, FI_NEED_IPU);
 275	ret = file_write_and_wait_range(file, start, end);
 276	clear_inode_flag(inode, FI_NEED_IPU);
 277
 278	if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
 279		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
 280		return ret;
 281	}
 282
 283	/* if the inode is dirty, let's recover all the time */
 284	if (!f2fs_skip_inode_update(inode, datasync)) {
 285		f2fs_write_inode(inode, NULL);
 286		goto go_write;
 287	}
 288
 289	/*
 290	 * if there is no written data, don't waste time to write recovery info.
 291	 */
 292	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
 293			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
 294
 295		/* it may call write_inode just prior to fsync */
 296		if (need_inode_page_update(sbi, ino))
 297			goto go_write;
 298
 299		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
 300				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
 301			goto flush_out;
 302		goto out;
 303	}
 304go_write:
 305	/*
 306	 * Both of fdatasync() and fsync() are able to be recovered from
 307	 * sudden-power-off.
 308	 */
 309	down_read(&F2FS_I(inode)->i_sem);
 310	cp_reason = need_do_checkpoint(inode);
 311	up_read(&F2FS_I(inode)->i_sem);
 312
 313	if (cp_reason) {
 314		/* all the dirty node pages should be flushed for POR */
 315		ret = f2fs_sync_fs(inode->i_sb, 1);
 316
 317		/*
 318		 * We've secured consistency through sync_fs. Following pino
 319		 * will be used only for fsynced inodes after checkpoint.
 320		 */
 321		try_to_fix_pino(inode);
 322		clear_inode_flag(inode, FI_APPEND_WRITE);
 323		clear_inode_flag(inode, FI_UPDATE_WRITE);
 324		goto out;
 325	}
 326sync_nodes:
 327	atomic_inc(&sbi->wb_sync_req[NODE]);
 328	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
 329	atomic_dec(&sbi->wb_sync_req[NODE]);
 330	if (ret)
 331		goto out;
 332
 333	/* if cp_error was enabled, we should avoid infinite loop */
 334	if (unlikely(f2fs_cp_error(sbi))) {
 335		ret = -EIO;
 336		goto out;
 337	}
 338
 339	if (f2fs_need_inode_block_update(sbi, ino)) {
 340		f2fs_mark_inode_dirty_sync(inode, true);
 341		f2fs_write_inode(inode, NULL);
 342		goto sync_nodes;
 343	}
 344
 345	/*
 346	 * If it's atomic_write, it's just fine to keep write ordering. So
 347	 * here we don't need to wait for node write completion, since we use
 348	 * node chain which serializes node blocks. If one of node writes are
 349	 * reordered, we can see simply broken chain, resulting in stopping
 350	 * roll-forward recovery. It means we'll recover all or none node blocks
 351	 * given fsync mark.
 352	 */
 353	if (!atomic) {
 354		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
 355		if (ret)
 356			goto out;
 357	}
 358
 359	/* once recovery info is written, don't need to tack this */
 360	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
 361	clear_inode_flag(inode, FI_APPEND_WRITE);
 362flush_out:
 363	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
 364		ret = f2fs_issue_flush(sbi, inode->i_ino);
 365	if (!ret) {
 366		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
 367		clear_inode_flag(inode, FI_UPDATE_WRITE);
 368		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
 369	}
 370	f2fs_update_time(sbi, REQ_TIME);
 371out:
 372	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
 
 373	return ret;
 374}
 375
 376int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
 377{
 378	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
 379		return -EIO;
 380	return f2fs_do_sync_file(file, start, end, datasync, false);
 
 
 
 
 
 
 
 
 
 
 381}
 382
 383static bool __found_offset(struct address_space *mapping, block_t blkaddr,
 384				pgoff_t index, int whence)
 385{
 386	switch (whence) {
 387	case SEEK_DATA:
 388		if (__is_valid_data_blkaddr(blkaddr))
 389			return true;
 390		if (blkaddr == NEW_ADDR &&
 391		    xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
 392			return true;
 393		break;
 394	case SEEK_HOLE:
 395		if (blkaddr == NULL_ADDR)
 396			return true;
 397		break;
 398	}
 399	return false;
 400}
 401
 402static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
 403{
 404	struct inode *inode = file->f_mapping->host;
 405	loff_t maxbytes = inode->i_sb->s_maxbytes;
 406	struct dnode_of_data dn;
 407	pgoff_t pgofs, end_offset;
 408	loff_t data_ofs = offset;
 409	loff_t isize;
 410	int err = 0;
 411
 412	inode_lock(inode);
 413
 414	isize = i_size_read(inode);
 415	if (offset >= isize)
 416		goto fail;
 417
 418	/* handle inline data case */
 419	if (f2fs_has_inline_data(inode)) {
 420		if (whence == SEEK_HOLE) {
 421			data_ofs = isize;
 422			goto found;
 423		} else if (whence == SEEK_DATA) {
 424			data_ofs = offset;
 425			goto found;
 426		}
 427	}
 428
 429	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
 430
 
 
 431	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 432		set_new_dnode(&dn, inode, NULL, NULL, 0);
 433		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
 434		if (err && err != -ENOENT) {
 435			goto fail;
 436		} else if (err == -ENOENT) {
 437			/* direct node does not exists */
 438			if (whence == SEEK_DATA) {
 439				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
 440				continue;
 441			} else {
 442				goto found;
 443			}
 444		}
 445
 446		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 447
 448		/* find data/hole in dnode block */
 449		for (; dn.ofs_in_node < end_offset;
 450				dn.ofs_in_node++, pgofs++,
 451				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 452			block_t blkaddr;
 
 453
 454			blkaddr = f2fs_data_blkaddr(&dn);
 455
 456			if (__is_valid_data_blkaddr(blkaddr) &&
 457				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
 458					blkaddr, DATA_GENERIC_ENHANCE)) {
 459				f2fs_put_dnode(&dn);
 460				goto fail;
 461			}
 462
 463			if (__found_offset(file->f_mapping, blkaddr,
 464							pgofs, whence)) {
 465				f2fs_put_dnode(&dn);
 466				goto found;
 467			}
 468		}
 469		f2fs_put_dnode(&dn);
 470	}
 471
 472	if (whence == SEEK_DATA)
 473		goto fail;
 474found:
 475	if (whence == SEEK_HOLE && data_ofs > isize)
 476		data_ofs = isize;
 477	inode_unlock(inode);
 478	return vfs_setpos(file, data_ofs, maxbytes);
 479fail:
 480	inode_unlock(inode);
 481	return -ENXIO;
 482}
 483
 484static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
 485{
 486	struct inode *inode = file->f_mapping->host;
 487	loff_t maxbytes = inode->i_sb->s_maxbytes;
 488
 489	if (f2fs_compressed_file(inode))
 490		maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
 491
 492	switch (whence) {
 493	case SEEK_SET:
 494	case SEEK_CUR:
 495	case SEEK_END:
 496		return generic_file_llseek_size(file, offset, whence,
 497						maxbytes, i_size_read(inode));
 498	case SEEK_DATA:
 499	case SEEK_HOLE:
 500		if (offset < 0)
 501			return -ENXIO;
 502		return f2fs_seek_block(file, offset, whence);
 503	}
 504
 505	return -EINVAL;
 506}
 507
 508static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 509{
 510	struct inode *inode = file_inode(file);
 
 511
 512	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 513		return -EIO;
 
 
 
 
 
 514
 515	if (!f2fs_is_compress_backend_ready(inode))
 516		return -EOPNOTSUPP;
 
 
 517
 518	file_accessed(file);
 519	vma->vm_ops = &f2fs_file_vm_ops;
 520	set_inode_flag(inode, FI_MMAP_FILE);
 521	return 0;
 522}
 523
 524static int f2fs_file_open(struct inode *inode, struct file *filp)
 525{
 526	int err = fscrypt_file_open(inode, filp);
 527
 528	if (err)
 529		return err;
 530
 531	if (!f2fs_is_compress_backend_ready(inode))
 532		return -EOPNOTSUPP;
 533
 534	err = fsverity_file_open(inode, filp);
 535	if (err)
 536		return err;
 537
 538	filp->f_mode |= FMODE_NOWAIT;
 539
 540	return dquot_file_open(inode, filp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 541}
 542
 543void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 544{
 545	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 546	struct f2fs_node *raw_node;
 547	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
 548	__le32 *addr;
 549	int base = 0;
 550	bool compressed_cluster = false;
 551	int cluster_index = 0, valid_blocks = 0;
 552	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
 553	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
 554
 555	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
 556		base = get_extra_isize(dn->inode);
 557
 558	raw_node = F2FS_NODE(dn->node_page);
 559	addr = blkaddr_in_node(raw_node) + base + ofs;
 560
 561	/* Assumption: truncateion starts with cluster */
 562	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
 563		block_t blkaddr = le32_to_cpu(*addr);
 564
 565		if (f2fs_compressed_file(dn->inode) &&
 566					!(cluster_index & (cluster_size - 1))) {
 567			if (compressed_cluster)
 568				f2fs_i_compr_blocks_update(dn->inode,
 569							valid_blocks, false);
 570			compressed_cluster = (blkaddr == COMPRESS_ADDR);
 571			valid_blocks = 0;
 572		}
 573
 574		if (blkaddr == NULL_ADDR)
 575			continue;
 576
 577		dn->data_blkaddr = NULL_ADDR;
 578		f2fs_set_data_blkaddr(dn);
 579
 580		if (__is_valid_data_blkaddr(blkaddr)) {
 581			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
 582					DATA_GENERIC_ENHANCE))
 583				continue;
 584			if (compressed_cluster)
 585				valid_blocks++;
 586		}
 587
 588		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 589			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
 590
 591		f2fs_invalidate_blocks(sbi, blkaddr);
 592
 593		if (!released || blkaddr != COMPRESS_ADDR)
 594			nr_free++;
 595	}
 596
 597	if (compressed_cluster)
 598		f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
 599
 600	if (nr_free) {
 601		pgoff_t fofs;
 602		/*
 603		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
 604		 * we will invalidate all blkaddr in the whole range.
 605		 */
 606		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
 607							dn->inode) + ofs;
 608		f2fs_update_extent_cache_range(dn, fofs, 0, len);
 609		dec_valid_block_count(sbi, dn->inode, nr_free);
 
 610	}
 611	dn->ofs_in_node = ofs;
 612
 613	f2fs_update_time(sbi, REQ_TIME);
 614	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
 615					 dn->ofs_in_node, nr_free);
 
 616}
 617
 618void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
 619{
 620	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
 621}
 622
 623static int truncate_partial_data_page(struct inode *inode, u64 from,
 624								bool cache_only)
 625{
 626	loff_t offset = from & (PAGE_SIZE - 1);
 627	pgoff_t index = from >> PAGE_SHIFT;
 628	struct address_space *mapping = inode->i_mapping;
 629	struct page *page;
 630
 631	if (!offset && !cache_only)
 632		return 0;
 633
 634	if (cache_only) {
 635		page = find_lock_page(mapping, index);
 636		if (page && PageUptodate(page))
 637			goto truncate_out;
 638		f2fs_put_page(page, 1);
 639		return 0;
 640	}
 641
 642	page = f2fs_get_lock_data_page(inode, index, true);
 643	if (IS_ERR(page))
 644		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
 645truncate_out:
 646	f2fs_wait_on_page_writeback(page, DATA, true, true);
 647	zero_user(page, offset, PAGE_SIZE - offset);
 648
 649	/* An encrypted inode should have a key and truncate the last page. */
 650	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
 651	if (!cache_only)
 652		set_page_dirty(page);
 653	f2fs_put_page(page, 1);
 654	return 0;
 655}
 656
 657int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
 658{
 659	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
 660	struct dnode_of_data dn;
 661	pgoff_t free_from;
 662	int count = 0, err = 0;
 663	struct page *ipage;
 664	bool truncate_page = false;
 665
 666	trace_f2fs_truncate_blocks_enter(inode, from);
 667
 668	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
 669
 670	if (free_from >= max_file_blocks(inode))
 671		goto free_partial;
 672
 673	if (lock)
 674		f2fs_lock_op(sbi);
 675
 676	ipage = f2fs_get_node_page(sbi, inode->i_ino);
 677	if (IS_ERR(ipage)) {
 678		err = PTR_ERR(ipage);
 679		goto out;
 680	}
 681
 682	if (f2fs_has_inline_data(inode)) {
 683		f2fs_truncate_inline_inode(inode, ipage, from);
 
 684		f2fs_put_page(ipage, 1);
 685		truncate_page = true;
 686		goto out;
 687	}
 688
 689	set_new_dnode(&dn, inode, ipage, NULL, 0);
 690	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
 691	if (err) {
 692		if (err == -ENOENT)
 693			goto free_next;
 694		goto out;
 695	}
 696
 697	count = ADDRS_PER_PAGE(dn.node_page, inode);
 698
 699	count -= dn.ofs_in_node;
 700	f2fs_bug_on(sbi, count < 0);
 701
 702	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
 703		f2fs_truncate_data_blocks_range(&dn, count);
 704		free_from += count;
 705	}
 706
 707	f2fs_put_dnode(&dn);
 708free_next:
 709	err = f2fs_truncate_inode_blocks(inode, free_from);
 710out:
 711	if (lock)
 712		f2fs_unlock_op(sbi);
 713free_partial:
 714	/* lastly zero out the first data page */
 715	if (!err)
 716		err = truncate_partial_data_page(inode, from, truncate_page);
 717
 718	trace_f2fs_truncate_blocks_exit(inode, err);
 719	return err;
 720}
 721
 722int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
 723{
 724	u64 free_from = from;
 725	int err;
 726
 727#ifdef CONFIG_F2FS_FS_COMPRESSION
 728	/*
 729	 * for compressed file, only support cluster size
 730	 * aligned truncation.
 731	 */
 732	if (f2fs_compressed_file(inode))
 733		free_from = round_up(from,
 734				F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
 735#endif
 736
 737	err = f2fs_do_truncate_blocks(inode, free_from, lock);
 738	if (err)
 739		return err;
 740
 741#ifdef CONFIG_F2FS_FS_COMPRESSION
 742	if (from != free_from) {
 743		err = f2fs_truncate_partial_cluster(inode, from, lock);
 744		if (err)
 745			return err;
 746	}
 747#endif
 748
 749	return 0;
 750}
 751
 752int f2fs_truncate(struct inode *inode)
 753{
 754	int err;
 755
 756	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 757		return -EIO;
 758
 759	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 760				S_ISLNK(inode->i_mode)))
 761		return 0;
 762
 763	trace_f2fs_truncate(inode);
 764
 765	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
 766		f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
 767		return -EIO;
 768	}
 769
 770	err = dquot_initialize(inode);
 771	if (err)
 772		return err;
 773
 774	/* we should check inline_data size */
 775	if (!f2fs_may_inline_data(inode)) {
 776		err = f2fs_convert_inline_inode(inode);
 777		if (err)
 778			return err;
 779	}
 780
 781	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
 782	if (err)
 783		return err;
 784
 785	inode->i_mtime = inode->i_ctime = current_time(inode);
 786	f2fs_mark_inode_dirty_sync(inode, false);
 787	return 0;
 788}
 789
 790int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
 791		 struct kstat *stat, u32 request_mask, unsigned int query_flags)
 792{
 793	struct inode *inode = d_inode(path->dentry);
 794	struct f2fs_inode_info *fi = F2FS_I(inode);
 795	struct f2fs_inode *ri;
 796	unsigned int flags;
 797
 798	if (f2fs_has_extra_attr(inode) &&
 799			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
 800			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
 801		stat->result_mask |= STATX_BTIME;
 802		stat->btime.tv_sec = fi->i_crtime.tv_sec;
 803		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
 804	}
 805
 806	flags = fi->i_flags;
 807	if (flags & F2FS_COMPR_FL)
 808		stat->attributes |= STATX_ATTR_COMPRESSED;
 809	if (flags & F2FS_APPEND_FL)
 810		stat->attributes |= STATX_ATTR_APPEND;
 811	if (IS_ENCRYPTED(inode))
 812		stat->attributes |= STATX_ATTR_ENCRYPTED;
 813	if (flags & F2FS_IMMUTABLE_FL)
 814		stat->attributes |= STATX_ATTR_IMMUTABLE;
 815	if (flags & F2FS_NODUMP_FL)
 816		stat->attributes |= STATX_ATTR_NODUMP;
 817	if (IS_VERITY(inode))
 818		stat->attributes |= STATX_ATTR_VERITY;
 819
 820	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
 821				  STATX_ATTR_APPEND |
 822				  STATX_ATTR_ENCRYPTED |
 823				  STATX_ATTR_IMMUTABLE |
 824				  STATX_ATTR_NODUMP |
 825				  STATX_ATTR_VERITY);
 826
 827	generic_fillattr(&init_user_ns, inode, stat);
 828
 829	/* we need to show initial sectors used for inline_data/dentries */
 830	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
 831					f2fs_has_inline_dentry(inode))
 832		stat->blocks += (stat->size + 511) >> 9;
 833
 834	return 0;
 835}
 836
 837#ifdef CONFIG_F2FS_FS_POSIX_ACL
 838static void __setattr_copy(struct user_namespace *mnt_userns,
 839			   struct inode *inode, const struct iattr *attr)
 840{
 
 841	unsigned int ia_valid = attr->ia_valid;
 842
 843	if (ia_valid & ATTR_UID)
 844		inode->i_uid = attr->ia_uid;
 845	if (ia_valid & ATTR_GID)
 846		inode->i_gid = attr->ia_gid;
 847	if (ia_valid & ATTR_ATIME)
 848		inode->i_atime = attr->ia_atime;
 
 849	if (ia_valid & ATTR_MTIME)
 850		inode->i_mtime = attr->ia_mtime;
 
 851	if (ia_valid & ATTR_CTIME)
 852		inode->i_ctime = attr->ia_ctime;
 
 853	if (ia_valid & ATTR_MODE) {
 854		umode_t mode = attr->ia_mode;
 855		kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
 856
 857		if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
 858			mode &= ~S_ISGID;
 859		set_acl_inode(inode, mode);
 860	}
 861}
 862#else
 863#define __setattr_copy setattr_copy
 864#endif
 865
 866int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
 867		 struct iattr *attr)
 868{
 869	struct inode *inode = d_inode(dentry);
 
 870	int err;
 871
 872	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
 873		return -EIO;
 874
 875	if (unlikely(IS_IMMUTABLE(inode)))
 876		return -EPERM;
 877
 878	if (unlikely(IS_APPEND(inode) &&
 879			(attr->ia_valid & (ATTR_MODE | ATTR_UID |
 880				  ATTR_GID | ATTR_TIMES_SET))))
 881		return -EPERM;
 882
 883	if ((attr->ia_valid & ATTR_SIZE) &&
 884		!f2fs_is_compress_backend_ready(inode))
 885		return -EOPNOTSUPP;
 886
 887	err = setattr_prepare(&init_user_ns, dentry, attr);
 888	if (err)
 889		return err;
 890
 891	err = fscrypt_prepare_setattr(dentry, attr);
 892	if (err)
 893		return err;
 894
 895	err = fsverity_prepare_setattr(dentry, attr);
 896	if (err)
 897		return err;
 898
 899	if (is_quota_modification(inode, attr)) {
 900		err = dquot_initialize(inode);
 901		if (err)
 902			return err;
 903	}
 904	if ((attr->ia_valid & ATTR_UID &&
 905		!uid_eq(attr->ia_uid, inode->i_uid)) ||
 906		(attr->ia_valid & ATTR_GID &&
 907		!gid_eq(attr->ia_gid, inode->i_gid))) {
 908		f2fs_lock_op(F2FS_I_SB(inode));
 909		err = dquot_transfer(inode, attr);
 910		if (err) {
 911			set_sbi_flag(F2FS_I_SB(inode),
 912					SBI_QUOTA_NEED_REPAIR);
 913			f2fs_unlock_op(F2FS_I_SB(inode));
 914			return err;
 915		}
 916		/*
 917		 * update uid/gid under lock_op(), so that dquot and inode can
 918		 * be updated atomically.
 919		 */
 920		if (attr->ia_valid & ATTR_UID)
 921			inode->i_uid = attr->ia_uid;
 922		if (attr->ia_valid & ATTR_GID)
 923			inode->i_gid = attr->ia_gid;
 924		f2fs_mark_inode_dirty_sync(inode, true);
 925		f2fs_unlock_op(F2FS_I_SB(inode));
 926	}
 927
 928	if (attr->ia_valid & ATTR_SIZE) {
 929		loff_t old_size = i_size_read(inode);
 930
 931		if (attr->ia_size > MAX_INLINE_DATA(inode)) {
 932			/*
 933			 * should convert inline inode before i_size_write to
 934			 * keep smaller than inline_data size with inline flag.
 935			 */
 936			err = f2fs_convert_inline_inode(inode);
 937			if (err)
 938				return err;
 939		}
 940
 941		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 942		down_write(&F2FS_I(inode)->i_mmap_sem);
 943
 944		truncate_setsize(inode, attr->ia_size);
 945
 946		if (attr->ia_size <= old_size)
 947			err = f2fs_truncate(inode);
 948		/*
 949		 * do not trim all blocks after i_size if target size is
 950		 * larger than i_size.
 951		 */
 952		up_write(&F2FS_I(inode)->i_mmap_sem);
 953		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 954		if (err)
 955			return err;
 956
 957		spin_lock(&F2FS_I(inode)->i_size_lock);
 958		inode->i_mtime = inode->i_ctime = current_time(inode);
 959		F2FS_I(inode)->last_disk_size = i_size_read(inode);
 960		spin_unlock(&F2FS_I(inode)->i_size_lock);
 
 
 
 
 961	}
 962
 963	__setattr_copy(&init_user_ns, inode, attr);
 964
 965	if (attr->ia_valid & ATTR_MODE) {
 966		err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
 967
 968		if (is_inode_flag_set(inode, FI_ACL_MODE)) {
 969			if (!err)
 970				inode->i_mode = F2FS_I(inode)->i_acl_mode;
 971			clear_inode_flag(inode, FI_ACL_MODE);
 972		}
 973	}
 974
 975	/* file size may changed here */
 976	f2fs_mark_inode_dirty_sync(inode, true);
 977
 978	/* inode change will produce dirty node pages flushed by checkpoint */
 979	f2fs_balance_fs(F2FS_I_SB(inode), true);
 980
 981	return err;
 982}
 983
 984const struct inode_operations f2fs_file_inode_operations = {
 985	.getattr	= f2fs_getattr,
 986	.setattr	= f2fs_setattr,
 987	.get_acl	= f2fs_get_acl,
 988	.set_acl	= f2fs_set_acl,
 
 
 
 989	.listxattr	= f2fs_listxattr,
 
 
 990	.fiemap		= f2fs_fiemap,
 991	.fileattr_get	= f2fs_fileattr_get,
 992	.fileattr_set	= f2fs_fileattr_set,
 993};
 994
 995static int fill_zero(struct inode *inode, pgoff_t index,
 996					loff_t start, loff_t len)
 997{
 998	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 999	struct page *page;
1000
1001	if (!len)
1002		return 0;
1003
1004	f2fs_balance_fs(sbi, true);
1005
1006	f2fs_lock_op(sbi);
1007	page = f2fs_get_new_data_page(inode, NULL, index, false);
1008	f2fs_unlock_op(sbi);
1009
1010	if (IS_ERR(page))
1011		return PTR_ERR(page);
1012
1013	f2fs_wait_on_page_writeback(page, DATA, true, true);
1014	zero_user(page, start, len);
1015	set_page_dirty(page);
1016	f2fs_put_page(page, 1);
1017	return 0;
1018}
1019
1020int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1021{
1022	int err;
1023
1024	while (pg_start < pg_end) {
1025		struct dnode_of_data dn;
1026		pgoff_t end_offset, count;
1027
1028		set_new_dnode(&dn, inode, NULL, NULL, 0);
1029		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1030		if (err) {
1031			if (err == -ENOENT) {
1032				pg_start = f2fs_get_next_page_offset(&dn,
1033								pg_start);
1034				continue;
1035			}
1036			return err;
1037		}
1038
1039		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1040		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1041
1042		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1043
1044		f2fs_truncate_data_blocks_range(&dn, count);
1045		f2fs_put_dnode(&dn);
1046
1047		pg_start += count;
1048	}
1049	return 0;
1050}
1051
1052static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1053{
1054	pgoff_t pg_start, pg_end;
1055	loff_t off_start, off_end;
1056	int ret;
1057
1058	ret = f2fs_convert_inline_inode(inode);
1059	if (ret)
1060		return ret;
1061
1062	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1063	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1064
1065	off_start = offset & (PAGE_SIZE - 1);
1066	off_end = (offset + len) & (PAGE_SIZE - 1);
1067
1068	if (pg_start == pg_end) {
1069		ret = fill_zero(inode, pg_start, off_start,
1070						off_end - off_start);
1071		if (ret)
1072			return ret;
1073	} else {
1074		if (off_start) {
1075			ret = fill_zero(inode, pg_start++, off_start,
1076						PAGE_SIZE - off_start);
1077			if (ret)
1078				return ret;
1079		}
1080		if (off_end) {
1081			ret = fill_zero(inode, pg_end, 0, off_end);
1082			if (ret)
1083				return ret;
1084		}
1085
1086		if (pg_start < pg_end) {
 
1087			loff_t blk_start, blk_end;
1088			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1089
1090			f2fs_balance_fs(sbi, true);
1091
1092			blk_start = (loff_t)pg_start << PAGE_SHIFT;
1093			blk_end = (loff_t)pg_end << PAGE_SHIFT;
1094
1095			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1096			down_write(&F2FS_I(inode)->i_mmap_sem);
1097
1098			truncate_pagecache_range(inode, blk_start, blk_end - 1);
1099
1100			f2fs_lock_op(sbi);
1101			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1102			f2fs_unlock_op(sbi);
1103
1104			up_write(&F2FS_I(inode)->i_mmap_sem);
1105			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1106		}
1107	}
1108
1109	return ret;
1110}
1111
1112static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1113				int *do_replace, pgoff_t off, pgoff_t len)
1114{
1115	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1116	struct dnode_of_data dn;
1117	int ret, done, i;
 
 
1118
1119next_dnode:
1120	set_new_dnode(&dn, inode, NULL, NULL, 0);
1121	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1122	if (ret && ret != -ENOENT) {
1123		return ret;
1124	} else if (ret == -ENOENT) {
1125		if (dn.max_level == 0)
1126			return -ENOENT;
1127		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1128						dn.ofs_in_node, len);
1129		blkaddr += done;
1130		do_replace += done;
1131		goto next;
1132	}
1133
1134	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1135							dn.ofs_in_node, len);
1136	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1137		*blkaddr = f2fs_data_blkaddr(&dn);
1138
1139		if (__is_valid_data_blkaddr(*blkaddr) &&
1140			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
1141					DATA_GENERIC_ENHANCE)) {
1142			f2fs_put_dnode(&dn);
1143			return -EFSCORRUPTED;
1144		}
1145
1146		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1147
1148			if (f2fs_lfs_mode(sbi)) {
1149				f2fs_put_dnode(&dn);
1150				return -EOPNOTSUPP;
1151			}
1152
1153			/* do not invalidate this block address */
1154			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1155			*do_replace = 1;
1156		}
1157	}
1158	f2fs_put_dnode(&dn);
1159next:
1160	len -= done;
1161	off += done;
1162	if (len)
1163		goto next_dnode;
1164	return 0;
1165}
1166
1167static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1168				int *do_replace, pgoff_t off, int len)
1169{
1170	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1171	struct dnode_of_data dn;
1172	int ret, i;
1173
1174	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1175		if (*do_replace == 0)
1176			continue;
1177
1178		set_new_dnode(&dn, inode, NULL, NULL, 0);
1179		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1180		if (ret) {
1181			dec_valid_block_count(sbi, inode, 1);
1182			f2fs_invalidate_blocks(sbi, *blkaddr);
1183		} else {
1184			f2fs_update_data_blkaddr(&dn, *blkaddr);
1185		}
1186		f2fs_put_dnode(&dn);
1187	}
1188	return 0;
1189}
1190
1191static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1192			block_t *blkaddr, int *do_replace,
1193			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1194{
1195	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1196	pgoff_t i = 0;
1197	int ret;
1198
1199	while (i < len) {
1200		if (blkaddr[i] == NULL_ADDR && !full) {
1201			i++;
1202			continue;
1203		}
1204
1205		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1206			struct dnode_of_data dn;
1207			struct node_info ni;
1208			size_t new_size;
1209			pgoff_t ilen;
1210
1211			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1212			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1213			if (ret)
1214				return ret;
1215
1216			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1217			if (ret) {
1218				f2fs_put_dnode(&dn);
1219				return ret;
1220			}
1221
1222			ilen = min((pgoff_t)
1223				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1224						dn.ofs_in_node, len - i);
1225			do {
1226				dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1227				f2fs_truncate_data_blocks_range(&dn, 1);
1228
1229				if (do_replace[i]) {
1230					f2fs_i_blocks_write(src_inode,
1231							1, false, false);
1232					f2fs_i_blocks_write(dst_inode,
1233							1, true, false);
1234					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1235					blkaddr[i], ni.version, true, false);
1236
1237					do_replace[i] = 0;
1238				}
1239				dn.ofs_in_node++;
1240				i++;
1241				new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1242				if (dst_inode->i_size < new_size)
1243					f2fs_i_size_write(dst_inode, new_size);
1244			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1245
1246			f2fs_put_dnode(&dn);
1247		} else {
1248			struct page *psrc, *pdst;
 
 
 
1249
1250			psrc = f2fs_get_lock_data_page(src_inode,
1251							src + i, true);
1252			if (IS_ERR(psrc))
1253				return PTR_ERR(psrc);
1254			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1255								true);
1256			if (IS_ERR(pdst)) {
1257				f2fs_put_page(psrc, 1);
1258				return PTR_ERR(pdst);
1259			}
1260			f2fs_copy_page(psrc, pdst);
1261			set_page_dirty(pdst);
1262			f2fs_put_page(pdst, 1);
1263			f2fs_put_page(psrc, 1);
1264
1265			ret = f2fs_truncate_hole(src_inode,
1266						src + i, src + i + 1);
1267			if (ret)
1268				return ret;
1269			i++;
1270		}
1271	}
1272	return 0;
1273}
 
1274
1275static int __exchange_data_block(struct inode *src_inode,
1276			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1277			pgoff_t len, bool full)
1278{
1279	block_t *src_blkaddr;
1280	int *do_replace;
1281	pgoff_t olen;
1282	int ret;
1283
1284	while (len) {
1285		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1286
1287		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1288					array_size(olen, sizeof(block_t)),
1289					GFP_NOFS);
1290		if (!src_blkaddr)
1291			return -ENOMEM;
1292
1293		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1294					array_size(olen, sizeof(int)),
1295					GFP_NOFS);
1296		if (!do_replace) {
1297			kvfree(src_blkaddr);
1298			return -ENOMEM;
1299		}
1300
1301		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1302					do_replace, src, olen);
1303		if (ret)
1304			goto roll_back;
1305
1306		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1307					do_replace, src, dst, olen, full);
1308		if (ret)
1309			goto roll_back;
1310
1311		src += olen;
1312		dst += olen;
1313		len -= olen;
1314
1315		kvfree(src_blkaddr);
1316		kvfree(do_replace);
1317	}
1318	return 0;
1319
1320roll_back:
1321	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1322	kvfree(src_blkaddr);
1323	kvfree(do_replace);
 
1324	return ret;
1325}
1326
1327static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1328{
1329	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1330	pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1331	pgoff_t start = offset >> PAGE_SHIFT;
1332	pgoff_t end = (offset + len) >> PAGE_SHIFT;
1333	int ret;
1334
1335	f2fs_balance_fs(sbi, true);
1336
1337	/* avoid gc operation during block exchange */
1338	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1339	down_write(&F2FS_I(inode)->i_mmap_sem);
1340
1341	f2fs_lock_op(sbi);
1342	f2fs_drop_extent_tree(inode);
1343	truncate_pagecache(inode, offset);
1344	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1345	f2fs_unlock_op(sbi);
1346
1347	up_write(&F2FS_I(inode)->i_mmap_sem);
1348	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
 
 
 
 
 
1349	return ret;
1350}
1351
1352static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1353{
 
1354	loff_t new_size;
1355	int ret;
1356
1357	if (offset + len >= i_size_read(inode))
1358		return -EINVAL;
1359
1360	/* collapse range should be aligned to block size of f2fs. */
1361	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1362		return -EINVAL;
1363
1364	ret = f2fs_convert_inline_inode(inode);
1365	if (ret)
1366		return ret;
1367
 
 
 
1368	/* write out all dirty pages from offset */
1369	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1370	if (ret)
1371		return ret;
1372
1373	ret = f2fs_do_collapse(inode, offset, len);
 
 
1374	if (ret)
1375		return ret;
1376
1377	/* write out all moved pages, if possible */
1378	down_write(&F2FS_I(inode)->i_mmap_sem);
1379	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1380	truncate_pagecache(inode, offset);
1381
1382	new_size = i_size_read(inode) - len;
1383	ret = f2fs_truncate_blocks(inode, new_size, true);
1384	up_write(&F2FS_I(inode)->i_mmap_sem);
1385	if (!ret)
1386		f2fs_i_size_write(inode, new_size);
1387	return ret;
1388}
1389
1390static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1391								pgoff_t end)
1392{
1393	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1394	pgoff_t index = start;
1395	unsigned int ofs_in_node = dn->ofs_in_node;
1396	blkcnt_t count = 0;
1397	int ret;
1398
1399	for (; index < end; index++, dn->ofs_in_node++) {
1400		if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1401			count++;
1402	}
1403
1404	dn->ofs_in_node = ofs_in_node;
1405	ret = f2fs_reserve_new_blocks(dn, count);
1406	if (ret)
1407		return ret;
1408
1409	dn->ofs_in_node = ofs_in_node;
1410	for (index = start; index < end; index++, dn->ofs_in_node++) {
1411		dn->data_blkaddr = f2fs_data_blkaddr(dn);
1412		/*
1413		 * f2fs_reserve_new_blocks will not guarantee entire block
1414		 * allocation.
1415		 */
1416		if (dn->data_blkaddr == NULL_ADDR) {
1417			ret = -ENOSPC;
1418			break;
1419		}
1420		if (dn->data_blkaddr != NEW_ADDR) {
1421			f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1422			dn->data_blkaddr = NEW_ADDR;
1423			f2fs_set_data_blkaddr(dn);
1424		}
1425	}
1426
1427	f2fs_update_extent_cache_range(dn, start, 0, index - start);
1428
1429	return ret;
1430}
1431
1432static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1433								int mode)
1434{
1435	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1436	struct address_space *mapping = inode->i_mapping;
1437	pgoff_t index, pg_start, pg_end;
1438	loff_t new_size = i_size_read(inode);
1439	loff_t off_start, off_end;
1440	int ret = 0;
1441
1442	ret = inode_newsize_ok(inode, (len + offset));
1443	if (ret)
1444		return ret;
1445
1446	ret = f2fs_convert_inline_inode(inode);
1447	if (ret)
1448		return ret;
1449
1450	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1451	if (ret)
1452		return ret;
1453
 
 
1454	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1455	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1456
1457	off_start = offset & (PAGE_SIZE - 1);
1458	off_end = (offset + len) & (PAGE_SIZE - 1);
1459
1460	if (pg_start == pg_end) {
1461		ret = fill_zero(inode, pg_start, off_start,
1462						off_end - off_start);
1463		if (ret)
1464			return ret;
1465
 
 
1466		new_size = max_t(loff_t, new_size, offset + len);
1467	} else {
1468		if (off_start) {
1469			ret = fill_zero(inode, pg_start++, off_start,
1470						PAGE_SIZE - off_start);
1471			if (ret)
1472				return ret;
1473
1474			new_size = max_t(loff_t, new_size,
1475					(loff_t)pg_start << PAGE_SHIFT);
1476		}
1477
1478		for (index = pg_start; index < pg_end;) {
1479			struct dnode_of_data dn;
1480			unsigned int end_offset;
1481			pgoff_t end;
1482
1483			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1484			down_write(&F2FS_I(inode)->i_mmap_sem);
1485
1486			truncate_pagecache_range(inode,
1487				(loff_t)index << PAGE_SHIFT,
1488				((loff_t)pg_end << PAGE_SHIFT) - 1);
1489
1490			f2fs_lock_op(sbi);
1491
1492			set_new_dnode(&dn, inode, NULL, NULL, 0);
1493			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1494			if (ret) {
1495				f2fs_unlock_op(sbi);
1496				up_write(&F2FS_I(inode)->i_mmap_sem);
1497				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1498				goto out;
1499			}
1500
1501			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1502			end = min(pg_end, end_offset - dn.ofs_in_node + index);
 
 
 
 
1503
1504			ret = f2fs_do_zero_range(&dn, index, end);
 
 
 
1505			f2fs_put_dnode(&dn);
1506
1507			f2fs_unlock_op(sbi);
1508			up_write(&F2FS_I(inode)->i_mmap_sem);
1509			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1510
1511			f2fs_balance_fs(sbi, dn.node_changed);
1512
1513			if (ret)
1514				goto out;
1515
1516			index = end;
1517			new_size = max_t(loff_t, new_size,
1518					(loff_t)index << PAGE_SHIFT);
1519		}
1520
1521		if (off_end) {
1522			ret = fill_zero(inode, pg_end, 0, off_end);
1523			if (ret)
1524				goto out;
1525
1526			new_size = max_t(loff_t, new_size, offset + len);
1527		}
1528	}
1529
1530out:
1531	if (new_size > i_size_read(inode)) {
1532		if (mode & FALLOC_FL_KEEP_SIZE)
1533			file_set_keep_isize(inode);
1534		else
1535			f2fs_i_size_write(inode, new_size);
1536	}
 
1537	return ret;
1538}
1539
1540static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1541{
1542	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1543	pgoff_t nr, pg_start, pg_end, delta, idx;
1544	loff_t new_size;
1545	int ret = 0;
1546
1547	new_size = i_size_read(inode) + len;
1548	ret = inode_newsize_ok(inode, new_size);
1549	if (ret)
1550		return ret;
1551
1552	if (offset >= i_size_read(inode))
1553		return -EINVAL;
1554
1555	/* insert range should be aligned to block size of f2fs. */
1556	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1557		return -EINVAL;
1558
1559	ret = f2fs_convert_inline_inode(inode);
1560	if (ret)
1561		return ret;
1562
1563	f2fs_balance_fs(sbi, true);
1564
1565	down_write(&F2FS_I(inode)->i_mmap_sem);
1566	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1567	up_write(&F2FS_I(inode)->i_mmap_sem);
1568	if (ret)
1569		return ret;
1570
1571	/* write out all dirty pages from offset */
1572	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1573	if (ret)
1574		return ret;
1575
 
 
1576	pg_start = offset >> PAGE_SHIFT;
1577	pg_end = (offset + len) >> PAGE_SHIFT;
1578	delta = pg_end - pg_start;
1579	idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1580
1581	/* avoid gc operation during block exchange */
1582	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1583	down_write(&F2FS_I(inode)->i_mmap_sem);
1584	truncate_pagecache(inode, offset);
1585
1586	while (!ret && idx > pg_start) {
1587		nr = idx - pg_start;
1588		if (nr > delta)
1589			nr = delta;
1590		idx -= nr;
1591
 
1592		f2fs_lock_op(sbi);
1593		f2fs_drop_extent_tree(inode);
1594
1595		ret = __exchange_data_block(inode, inode, idx,
1596					idx + delta, nr, false);
1597		f2fs_unlock_op(sbi);
 
 
1598	}
1599	up_write(&F2FS_I(inode)->i_mmap_sem);
1600	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1601
1602	/* write out all moved pages, if possible */
1603	down_write(&F2FS_I(inode)->i_mmap_sem);
1604	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1605	truncate_pagecache(inode, offset);
1606	up_write(&F2FS_I(inode)->i_mmap_sem);
1607
1608	if (!ret)
1609		f2fs_i_size_write(inode, new_size);
1610	return ret;
1611}
1612
1613static int expand_inode_data(struct inode *inode, loff_t offset,
1614					loff_t len, int mode)
1615{
1616	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1617	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1618			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1619			.m_may_create = true };
1620	pgoff_t pg_start, pg_end;
1621	loff_t new_size = i_size_read(inode);
1622	loff_t off_end;
1623	block_t expanded = 0;
1624	int err;
1625
1626	err = inode_newsize_ok(inode, (len + offset));
1627	if (err)
1628		return err;
1629
1630	err = f2fs_convert_inline_inode(inode);
1631	if (err)
1632		return err;
1633
1634	f2fs_balance_fs(sbi, true);
1635
1636	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1637	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1638	off_end = (offset + len) & (PAGE_SIZE - 1);
1639
1640	map.m_lblk = pg_start;
1641	map.m_len = pg_end - pg_start;
1642	if (off_end)
1643		map.m_len++;
1644
1645	if (!map.m_len)
1646		return 0;
1647
1648	if (f2fs_is_pinned_file(inode)) {
1649		block_t sec_blks = BLKS_PER_SEC(sbi);
1650		block_t sec_len = roundup(map.m_len, sec_blks);
1651
1652		map.m_len = sec_blks;
1653next_alloc:
1654		if (has_not_enough_free_secs(sbi, 0,
1655			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1656			down_write(&sbi->gc_lock);
1657			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1658			if (err && err != -ENODATA && err != -EAGAIN)
1659				goto out_err;
1660		}
1661
1662		down_write(&sbi->pin_sem);
1663
1664		f2fs_lock_op(sbi);
1665		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1666		f2fs_unlock_op(sbi);
1667
1668		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1669		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1670
1671		up_write(&sbi->pin_sem);
1672
1673		expanded += map.m_len;
1674		sec_len -= map.m_len;
1675		map.m_lblk += map.m_len;
1676		if (!err && sec_len)
1677			goto next_alloc;
1678
1679		map.m_len = expanded;
1680	} else {
1681		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1682		expanded = map.m_len;
1683	}
1684out_err:
1685	if (err) {
1686		pgoff_t last_off;
1687
1688		if (!expanded)
1689			return err;
1690
1691		last_off = pg_start + expanded - 1;
 
1692
1693		/* update new size to the failed position */
1694		new_size = (last_off == pg_end) ? offset + len :
1695					(loff_t)(last_off + 1) << PAGE_SHIFT;
1696	} else {
1697		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
 
 
 
 
 
 
 
 
 
1698	}
1699
1700	if (new_size > i_size_read(inode)) {
1701		if (mode & FALLOC_FL_KEEP_SIZE)
1702			file_set_keep_isize(inode);
1703		else
1704			f2fs_i_size_write(inode, new_size);
1705	}
 
1706
1707	return err;
1708}
1709
1710static long f2fs_fallocate(struct file *file, int mode,
1711				loff_t offset, loff_t len)
1712{
1713	struct inode *inode = file_inode(file);
1714	long ret = 0;
1715
1716	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1717		return -EIO;
1718	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1719		return -ENOSPC;
1720	if (!f2fs_is_compress_backend_ready(inode))
1721		return -EOPNOTSUPP;
1722
1723	/* f2fs only support ->fallocate for regular file */
1724	if (!S_ISREG(inode->i_mode))
1725		return -EINVAL;
1726
1727	if (IS_ENCRYPTED(inode) &&
1728		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1729		return -EOPNOTSUPP;
1730
1731	if (f2fs_compressed_file(inode) &&
1732		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1733			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1734		return -EOPNOTSUPP;
1735
1736	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1737			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1738			FALLOC_FL_INSERT_RANGE))
1739		return -EOPNOTSUPP;
1740
1741	inode_lock(inode);
1742
1743	if (mode & FALLOC_FL_PUNCH_HOLE) {
1744		if (offset >= inode->i_size)
1745			goto out;
1746
1747		ret = punch_hole(inode, offset, len);
1748	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1749		ret = f2fs_collapse_range(inode, offset, len);
1750	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1751		ret = f2fs_zero_range(inode, offset, len, mode);
1752	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1753		ret = f2fs_insert_range(inode, offset, len);
1754	} else {
1755		ret = expand_inode_data(inode, offset, len, mode);
1756	}
1757
1758	if (!ret) {
1759		inode->i_mtime = inode->i_ctime = current_time(inode);
1760		f2fs_mark_inode_dirty_sync(inode, false);
1761		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1762	}
1763
1764out:
1765	inode_unlock(inode);
1766
1767	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1768	return ret;
1769}
1770
1771static int f2fs_release_file(struct inode *inode, struct file *filp)
1772{
1773	/*
1774	 * f2fs_relase_file is called at every close calls. So we should
1775	 * not drop any inmemory pages by close called by other process.
1776	 */
1777	if (!(filp->f_mode & FMODE_WRITE) ||
1778			atomic_read(&inode->i_writecount) != 1)
1779		return 0;
1780
1781	/* some remained atomic pages should discarded */
1782	if (f2fs_is_atomic_file(inode))
1783		f2fs_drop_inmem_pages(inode);
1784	if (f2fs_is_volatile_file(inode)) {
1785		set_inode_flag(inode, FI_DROP_CACHE);
1786		filemap_fdatawrite(inode->i_mapping);
1787		clear_inode_flag(inode, FI_DROP_CACHE);
1788		clear_inode_flag(inode, FI_VOLATILE_FILE);
1789		stat_dec_volatile_write(inode);
1790	}
1791	return 0;
1792}
1793
1794static int f2fs_file_flush(struct file *file, fl_owner_t id)
1795{
1796	struct inode *inode = file_inode(file);
1797
1798	/*
1799	 * If the process doing a transaction is crashed, we should do
1800	 * roll-back. Otherwise, other reader/write can see corrupted database
1801	 * until all the writers close its file. Since this should be done
1802	 * before dropping file lock, it needs to do in ->flush.
1803	 */
1804	if (f2fs_is_atomic_file(inode) &&
1805			F2FS_I(inode)->inmem_task == current)
1806		f2fs_drop_inmem_pages(inode);
1807	return 0;
1808}
1809
1810static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1811{
 
1812	struct f2fs_inode_info *fi = F2FS_I(inode);
1813	u32 masked_flags = fi->i_flags & mask;
1814
1815	/* mask can be shrunk by flags_valid selector */
1816	iflags &= mask;
1817
1818	/* Is it quota file? Do not allow user to mess with it */
1819	if (IS_NOQUOTA(inode))
1820		return -EPERM;
1821
1822	if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1823		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1824			return -EOPNOTSUPP;
1825		if (!f2fs_empty_dir(inode))
1826			return -ENOTEMPTY;
1827	}
1828
1829	if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1830		if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1831			return -EOPNOTSUPP;
1832		if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1833			return -EINVAL;
1834	}
1835
1836	if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1837		if (masked_flags & F2FS_COMPR_FL) {
1838			if (!f2fs_disable_compressed_file(inode))
1839				return -EINVAL;
1840		}
1841		if (iflags & F2FS_NOCOMP_FL)
1842			return -EINVAL;
1843		if (iflags & F2FS_COMPR_FL) {
1844			if (!f2fs_may_compress(inode))
1845				return -EINVAL;
1846			if (S_ISREG(inode->i_mode) && inode->i_size)
1847				return -EINVAL;
1848
1849			set_compress_context(inode);
1850		}
1851	}
1852	if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1853		if (masked_flags & F2FS_COMPR_FL)
1854			return -EINVAL;
1855	}
1856
1857	fi->i_flags = iflags | (fi->i_flags & ~mask);
1858	f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1859					(fi->i_flags & F2FS_NOCOMP_FL));
1860
1861	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1862		set_inode_flag(inode, FI_PROJ_INHERIT);
1863	else
1864		clear_inode_flag(inode, FI_PROJ_INHERIT);
1865
1866	inode->i_ctime = current_time(inode);
1867	f2fs_set_inode_flags(inode);
1868	f2fs_mark_inode_dirty_sync(inode, true);
1869	return 0;
1870}
1871
1872/* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
 
 
 
 
 
 
1873
1874/*
1875 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1876 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1877 * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1878 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1879 *
1880 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1881 * FS_IOC_FSSETXATTR is done by the VFS.
1882 */
1883
1884static const struct {
1885	u32 iflag;
1886	u32 fsflag;
1887} f2fs_fsflags_map[] = {
1888	{ F2FS_COMPR_FL,	FS_COMPR_FL },
1889	{ F2FS_SYNC_FL,		FS_SYNC_FL },
1890	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
1891	{ F2FS_APPEND_FL,	FS_APPEND_FL },
1892	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
1893	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
1894	{ F2FS_NOCOMP_FL,	FS_NOCOMP_FL },
1895	{ F2FS_INDEX_FL,	FS_INDEX_FL },
1896	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
1897	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
1898	{ F2FS_CASEFOLD_FL,	FS_CASEFOLD_FL },
1899};
1900
1901#define F2FS_GETTABLE_FS_FL (		\
1902		FS_COMPR_FL |		\
1903		FS_SYNC_FL |		\
1904		FS_IMMUTABLE_FL |	\
1905		FS_APPEND_FL |		\
1906		FS_NODUMP_FL |		\
1907		FS_NOATIME_FL |		\
1908		FS_NOCOMP_FL |		\
1909		FS_INDEX_FL |		\
1910		FS_DIRSYNC_FL |		\
1911		FS_PROJINHERIT_FL |	\
1912		FS_ENCRYPT_FL |		\
1913		FS_INLINE_DATA_FL |	\
1914		FS_NOCOW_FL |		\
1915		FS_VERITY_FL |		\
1916		FS_CASEFOLD_FL)
1917
1918#define F2FS_SETTABLE_FS_FL (		\
1919		FS_COMPR_FL |		\
1920		FS_SYNC_FL |		\
1921		FS_IMMUTABLE_FL |	\
1922		FS_APPEND_FL |		\
1923		FS_NODUMP_FL |		\
1924		FS_NOATIME_FL |		\
1925		FS_NOCOMP_FL |		\
1926		FS_DIRSYNC_FL |		\
1927		FS_PROJINHERIT_FL |	\
1928		FS_CASEFOLD_FL)
1929
1930/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1931static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1932{
1933	u32 fsflags = 0;
1934	int i;
1935
1936	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1937		if (iflags & f2fs_fsflags_map[i].iflag)
1938			fsflags |= f2fs_fsflags_map[i].fsflag;
1939
1940	return fsflags;
1941}
1942
1943/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1944static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1945{
1946	u32 iflags = 0;
1947	int i;
 
 
1948
1949	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1950		if (fsflags & f2fs_fsflags_map[i].fsflag)
1951			iflags |= f2fs_fsflags_map[i].iflag;
 
1952
1953	return iflags;
 
 
 
 
 
1954}
1955
1956static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1957{
1958	struct inode *inode = file_inode(filp);
1959
1960	return put_user(inode->i_generation, (int __user *)arg);
1961}
1962
1963static int f2fs_ioc_start_atomic_write(struct file *filp)
1964{
1965	struct inode *inode = file_inode(filp);
1966	struct f2fs_inode_info *fi = F2FS_I(inode);
1967	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1968	int ret;
1969
1970	if (!inode_owner_or_capable(&init_user_ns, inode))
1971		return -EACCES;
1972
1973	if (!S_ISREG(inode->i_mode))
1974		return -EINVAL;
1975
1976	if (filp->f_flags & O_DIRECT)
1977		return -EINVAL;
1978
1979	ret = mnt_want_write_file(filp);
1980	if (ret)
1981		return ret;
1982
1983	inode_lock(inode);
1984
1985	f2fs_disable_compressed_file(inode);
1986
1987	if (f2fs_is_atomic_file(inode)) {
1988		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1989			ret = -EINVAL;
1990		goto out;
1991	}
1992
1993	ret = f2fs_convert_inline_inode(inode);
1994	if (ret)
1995		goto out;
1996
1997	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1998
1999	/*
2000	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2001	 * f2fs_is_atomic_file.
2002	 */
2003	if (get_dirty_pages(inode))
2004		f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2005			  inode->i_ino, get_dirty_pages(inode));
2006	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2007	if (ret) {
2008		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2009		goto out;
2010	}
2011
2012	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2013	if (list_empty(&fi->inmem_ilist))
2014		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2015	sbi->atomic_files++;
2016	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2017
2018	/* add inode in inmem_list first and set atomic_file */
2019	set_inode_flag(inode, FI_ATOMIC_FILE);
2020	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2021	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2022
 
2023	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2024	F2FS_I(inode)->inmem_task = current;
2025	stat_update_max_atomic_write(inode);
2026out:
2027	inode_unlock(inode);
2028	mnt_drop_write_file(filp);
2029	return ret;
2030}
2031
2032static int f2fs_ioc_commit_atomic_write(struct file *filp)
2033{
2034	struct inode *inode = file_inode(filp);
2035	int ret;
2036
2037	if (!inode_owner_or_capable(&init_user_ns, inode))
2038		return -EACCES;
2039
 
 
 
2040	ret = mnt_want_write_file(filp);
2041	if (ret)
2042		return ret;
2043
2044	f2fs_balance_fs(F2FS_I_SB(inode), true);
2045
2046	inode_lock(inode);
2047
2048	if (f2fs_is_volatile_file(inode)) {
2049		ret = -EINVAL;
2050		goto err_out;
2051	}
2052
2053	if (f2fs_is_atomic_file(inode)) {
2054		ret = f2fs_commit_inmem_pages(inode);
2055		if (ret)
 
 
2056			goto err_out;
2057
2058		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2059		if (!ret)
2060			f2fs_drop_inmem_pages(inode);
2061	} else {
2062		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2063	}
 
 
2064err_out:
2065	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2066		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2067		ret = -EINVAL;
2068	}
2069	inode_unlock(inode);
2070	mnt_drop_write_file(filp);
2071	return ret;
2072}
2073
2074static int f2fs_ioc_start_volatile_write(struct file *filp)
2075{
2076	struct inode *inode = file_inode(filp);
2077	int ret;
2078
2079	if (!inode_owner_or_capable(&init_user_ns, inode))
2080		return -EACCES;
2081
2082	if (!S_ISREG(inode->i_mode))
2083		return -EINVAL;
2084
2085	ret = mnt_want_write_file(filp);
2086	if (ret)
2087		return ret;
2088
2089	inode_lock(inode);
2090
2091	if (f2fs_is_volatile_file(inode))
2092		goto out;
2093
2094	ret = f2fs_convert_inline_inode(inode);
2095	if (ret)
2096		goto out;
2097
2098	stat_inc_volatile_write(inode);
2099	stat_update_max_volatile_write(inode);
2100
2101	set_inode_flag(inode, FI_VOLATILE_FILE);
2102	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2103out:
2104	inode_unlock(inode);
2105	mnt_drop_write_file(filp);
2106	return ret;
2107}
2108
2109static int f2fs_ioc_release_volatile_write(struct file *filp)
2110{
2111	struct inode *inode = file_inode(filp);
2112	int ret;
2113
2114	if (!inode_owner_or_capable(&init_user_ns, inode))
2115		return -EACCES;
2116
2117	ret = mnt_want_write_file(filp);
2118	if (ret)
2119		return ret;
2120
2121	inode_lock(inode);
2122
2123	if (!f2fs_is_volatile_file(inode))
2124		goto out;
2125
2126	if (!f2fs_is_first_block_written(inode)) {
2127		ret = truncate_partial_data_page(inode, 0, true);
2128		goto out;
2129	}
2130
2131	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2132out:
2133	inode_unlock(inode);
2134	mnt_drop_write_file(filp);
2135	return ret;
2136}
2137
2138static int f2fs_ioc_abort_volatile_write(struct file *filp)
2139{
2140	struct inode *inode = file_inode(filp);
2141	int ret;
2142
2143	if (!inode_owner_or_capable(&init_user_ns, inode))
2144		return -EACCES;
2145
2146	ret = mnt_want_write_file(filp);
2147	if (ret)
2148		return ret;
2149
2150	inode_lock(inode);
2151
2152	if (f2fs_is_atomic_file(inode))
2153		f2fs_drop_inmem_pages(inode);
2154	if (f2fs_is_volatile_file(inode)) {
2155		clear_inode_flag(inode, FI_VOLATILE_FILE);
2156		stat_dec_volatile_write(inode);
2157		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2158	}
2159
2160	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2161
2162	inode_unlock(inode);
2163
2164	mnt_drop_write_file(filp);
2165	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2166	return ret;
2167}
2168
2169static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2170{
2171	struct inode *inode = file_inode(filp);
2172	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2173	struct super_block *sb = sbi->sb;
2174	__u32 in;
2175	int ret = 0;
2176
2177	if (!capable(CAP_SYS_ADMIN))
2178		return -EPERM;
2179
2180	if (get_user(in, (__u32 __user *)arg))
2181		return -EFAULT;
2182
2183	if (in != F2FS_GOING_DOWN_FULLSYNC) {
2184		ret = mnt_want_write_file(filp);
2185		if (ret) {
2186			if (ret == -EROFS) {
2187				ret = 0;
2188				f2fs_stop_checkpoint(sbi, false);
2189				set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2190				trace_f2fs_shutdown(sbi, in, ret);
2191			}
2192			return ret;
2193		}
2194	}
2195
2196	switch (in) {
2197	case F2FS_GOING_DOWN_FULLSYNC:
2198		ret = freeze_bdev(sb->s_bdev);
2199		if (ret)
2200			goto out;
2201		f2fs_stop_checkpoint(sbi, false);
2202		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2203		thaw_bdev(sb->s_bdev);
2204		break;
2205	case F2FS_GOING_DOWN_METASYNC:
2206		/* do checkpoint only */
2207		ret = f2fs_sync_fs(sb, 1);
2208		if (ret)
2209			goto out;
2210		f2fs_stop_checkpoint(sbi, false);
2211		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2212		break;
2213	case F2FS_GOING_DOWN_NOSYNC:
2214		f2fs_stop_checkpoint(sbi, false);
2215		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2216		break;
2217	case F2FS_GOING_DOWN_METAFLUSH:
2218		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2219		f2fs_stop_checkpoint(sbi, false);
2220		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2221		break;
2222	case F2FS_GOING_DOWN_NEED_FSCK:
2223		set_sbi_flag(sbi, SBI_NEED_FSCK);
2224		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2225		set_sbi_flag(sbi, SBI_IS_DIRTY);
2226		/* do checkpoint only */
2227		ret = f2fs_sync_fs(sb, 1);
2228		goto out;
2229	default:
2230		ret = -EINVAL;
2231		goto out;
2232	}
2233
2234	f2fs_stop_gc_thread(sbi);
2235	f2fs_stop_discard_thread(sbi);
2236
2237	f2fs_drop_discard_cmd(sbi);
2238	clear_opt(sbi, DISCARD);
2239
2240	f2fs_update_time(sbi, REQ_TIME);
2241out:
2242	if (in != F2FS_GOING_DOWN_FULLSYNC)
2243		mnt_drop_write_file(filp);
2244
2245	trace_f2fs_shutdown(sbi, in, ret);
2246
2247	return ret;
2248}
2249
2250static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2251{
2252	struct inode *inode = file_inode(filp);
2253	struct super_block *sb = inode->i_sb;
2254	struct request_queue *q = bdev_get_queue(sb->s_bdev);
2255	struct fstrim_range range;
2256	int ret;
2257
2258	if (!capable(CAP_SYS_ADMIN))
2259		return -EPERM;
2260
2261	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2262		return -EOPNOTSUPP;
2263
2264	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2265				sizeof(range)))
2266		return -EFAULT;
2267
2268	ret = mnt_want_write_file(filp);
2269	if (ret)
2270		return ret;
2271
2272	range.minlen = max((unsigned int)range.minlen,
2273				q->limits.discard_granularity);
2274	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2275	mnt_drop_write_file(filp);
2276	if (ret < 0)
2277		return ret;
2278
2279	if (copy_to_user((struct fstrim_range __user *)arg, &range,
2280				sizeof(range)))
2281		return -EFAULT;
2282	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2283	return 0;
2284}
2285
2286static bool uuid_is_nonzero(__u8 u[16])
2287{
2288	int i;
2289
2290	for (i = 0; i < 16; i++)
2291		if (u[i])
2292			return true;
2293	return false;
2294}
2295
2296static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2297{
 
2298	struct inode *inode = file_inode(filp);
2299
2300	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2301		return -EOPNOTSUPP;
 
2302
2303	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2304
2305	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2306}
2307
2308static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2309{
2310	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2311		return -EOPNOTSUPP;
2312	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
 
 
 
 
 
 
 
 
2313}
2314
2315static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2316{
2317	struct inode *inode = file_inode(filp);
2318	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2319	int err;
2320
2321	if (!f2fs_sb_has_encrypt(sbi))
2322		return -EOPNOTSUPP;
2323
 
 
 
2324	err = mnt_want_write_file(filp);
2325	if (err)
2326		return err;
2327
2328	down_write(&sbi->sb_lock);
2329
2330	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2331		goto got_it;
2332
2333	/* update superblock with uuid */
2334	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2335
2336	err = f2fs_commit_super(sbi, false);
2337	if (err) {
2338		/* undo new data */
2339		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2340		goto out_err;
 
2341	}
 
2342got_it:
2343	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2344									16))
2345		err = -EFAULT;
2346out_err:
2347	up_write(&sbi->sb_lock);
2348	mnt_drop_write_file(filp);
2349	return err;
2350}
2351
2352static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2353					     unsigned long arg)
2354{
2355	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2356		return -EOPNOTSUPP;
2357
2358	return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2359}
2360
2361static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2362{
2363	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2364		return -EOPNOTSUPP;
2365
2366	return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2367}
2368
2369static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2370{
2371	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2372		return -EOPNOTSUPP;
2373
2374	return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2375}
2376
2377static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2378						    unsigned long arg)
2379{
2380	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2381		return -EOPNOTSUPP;
2382
2383	return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2384}
2385
2386static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2387					      unsigned long arg)
2388{
2389	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2390		return -EOPNOTSUPP;
2391
2392	return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2393}
2394
2395static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2396{
2397	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2398		return -EOPNOTSUPP;
2399
2400	return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2401}
2402
2403static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2404{
2405	struct inode *inode = file_inode(filp);
2406	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2407	__u32 sync;
2408	int ret;
2409
2410	if (!capable(CAP_SYS_ADMIN))
2411		return -EPERM;
2412
2413	if (get_user(sync, (__u32 __user *)arg))
2414		return -EFAULT;
2415
2416	if (f2fs_readonly(sbi->sb))
2417		return -EROFS;
2418
2419	ret = mnt_want_write_file(filp);
2420	if (ret)
2421		return ret;
2422
2423	if (!sync) {
2424		if (!down_write_trylock(&sbi->gc_lock)) {
2425			ret = -EBUSY;
2426			goto out;
2427		}
2428	} else {
2429		down_write(&sbi->gc_lock);
2430	}
2431
2432	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2433out:
2434	mnt_drop_write_file(filp);
2435	return ret;
2436}
2437
2438static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2439{
2440	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2441	u64 end;
2442	int ret;
2443
2444	if (!capable(CAP_SYS_ADMIN))
2445		return -EPERM;
2446	if (f2fs_readonly(sbi->sb))
2447		return -EROFS;
2448
2449	end = range->start + range->len;
2450	if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2451					end >= MAX_BLKADDR(sbi))
2452		return -EINVAL;
2453
2454	ret = mnt_want_write_file(filp);
2455	if (ret)
2456		return ret;
2457
2458do_more:
2459	if (!range->sync) {
2460		if (!down_write_trylock(&sbi->gc_lock)) {
2461			ret = -EBUSY;
2462			goto out;
2463		}
2464	} else {
2465		down_write(&sbi->gc_lock);
2466	}
2467
2468	ret = f2fs_gc(sbi, range->sync, true, false,
2469				GET_SEGNO(sbi, range->start));
2470	if (ret) {
2471		if (ret == -EBUSY)
2472			ret = -EAGAIN;
2473		goto out;
2474	}
2475	range->start += BLKS_PER_SEC(sbi);
2476	if (range->start <= end)
2477		goto do_more;
2478out:
2479	mnt_drop_write_file(filp);
2480	return ret;
2481}
2482
2483static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2484{
2485	struct f2fs_gc_range range;
2486
2487	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2488							sizeof(range)))
2489		return -EFAULT;
2490	return __f2fs_ioc_gc_range(filp, &range);
2491}
2492
2493static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2494{
2495	struct inode *inode = file_inode(filp);
2496	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2497	int ret;
2498
2499	if (!capable(CAP_SYS_ADMIN))
2500		return -EPERM;
2501
2502	if (f2fs_readonly(sbi->sb))
2503		return -EROFS;
2504
2505	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2506		f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2507		return -EINVAL;
2508	}
2509
2510	ret = mnt_want_write_file(filp);
2511	if (ret)
2512		return ret;
2513
2514	ret = f2fs_sync_fs(sbi->sb, 1);
2515
2516	mnt_drop_write_file(filp);
2517	return ret;
2518}
2519
2520static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2521					struct file *filp,
2522					struct f2fs_defragment *range)
2523{
2524	struct inode *inode = file_inode(filp);
2525	struct f2fs_map_blocks map = { .m_next_extent = NULL,
2526					.m_seg_type = NO_CHECK_TYPE,
2527					.m_may_create = false };
2528	struct extent_info ei = {0, 0, 0};
2529	pgoff_t pg_start, pg_end, next_pgofs;
2530	unsigned int blk_per_seg = sbi->blocks_per_seg;
2531	unsigned int total = 0, sec_num;
 
2532	block_t blk_end = 0;
2533	bool fragmented = false;
2534	int err;
2535
2536	/* if in-place-update policy is enabled, don't waste time here */
2537	if (f2fs_should_update_inplace(inode, NULL))
2538		return -EINVAL;
2539
2540	pg_start = range->start >> PAGE_SHIFT;
2541	pg_end = (range->start + range->len) >> PAGE_SHIFT;
2542
2543	f2fs_balance_fs(sbi, true);
2544
2545	inode_lock(inode);
2546
2547	/* writeback all dirty pages in the range */
2548	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2549						range->start + range->len - 1);
2550	if (err)
2551		goto out;
2552
2553	/*
2554	 * lookup mapping info in extent cache, skip defragmenting if physical
2555	 * block addresses are continuous.
2556	 */
2557	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2558		if (ei.fofs + ei.len >= pg_end)
2559			goto out;
2560	}
2561
2562	map.m_lblk = pg_start;
2563	map.m_next_pgofs = &next_pgofs;
2564
2565	/*
2566	 * lookup mapping info in dnode page cache, skip defragmenting if all
2567	 * physical block addresses are continuous even if there are hole(s)
2568	 * in logical blocks.
2569	 */
2570	while (map.m_lblk < pg_end) {
2571		map.m_len = pg_end - map.m_lblk;
2572		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2573		if (err)
2574			goto out;
2575
2576		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2577			map.m_lblk = next_pgofs;
2578			continue;
2579		}
2580
2581		if (blk_end && blk_end != map.m_pblk)
2582			fragmented = true;
2583
2584		/* record total count of block that we're going to move */
2585		total += map.m_len;
2586
2587		blk_end = map.m_pblk + map.m_len;
2588
2589		map.m_lblk += map.m_len;
2590	}
2591
2592	if (!fragmented) {
2593		total = 0;
2594		goto out;
2595	}
2596
2597	sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
 
 
 
2598
2599	/*
2600	 * make sure there are enough free section for LFS allocation, this can
2601	 * avoid defragment running in SSR mode when free section are allocated
2602	 * intensively
2603	 */
2604	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2605		err = -EAGAIN;
2606		goto out;
2607	}
2608
2609	map.m_lblk = pg_start;
2610	map.m_len = pg_end - pg_start;
2611	total = 0;
2612
2613	while (map.m_lblk < pg_end) {
2614		pgoff_t idx;
2615		int cnt = 0;
2616
2617do_map:
2618		map.m_len = pg_end - map.m_lblk;
2619		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2620		if (err)
2621			goto clear_out;
2622
2623		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2624			map.m_lblk = next_pgofs;
2625			goto check;
2626		}
2627
2628		set_inode_flag(inode, FI_DO_DEFRAG);
2629
2630		idx = map.m_lblk;
2631		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2632			struct page *page;
2633
2634			page = f2fs_get_lock_data_page(inode, idx, true);
2635			if (IS_ERR(page)) {
2636				err = PTR_ERR(page);
2637				goto clear_out;
2638			}
2639
2640			set_page_dirty(page);
2641			f2fs_put_page(page, 1);
2642
2643			idx++;
2644			cnt++;
2645			total++;
2646		}
2647
2648		map.m_lblk = idx;
2649check:
2650		if (map.m_lblk < pg_end && cnt < blk_per_seg)
2651			goto do_map;
2652
2653		clear_inode_flag(inode, FI_DO_DEFRAG);
2654
2655		err = filemap_fdatawrite(inode->i_mapping);
2656		if (err)
2657			goto out;
2658	}
2659clear_out:
2660	clear_inode_flag(inode, FI_DO_DEFRAG);
2661out:
2662	inode_unlock(inode);
2663	if (!err)
2664		range->len = (u64)total << PAGE_SHIFT;
2665	return err;
2666}
2667
2668static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2669{
2670	struct inode *inode = file_inode(filp);
2671	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2672	struct f2fs_defragment range;
2673	int err;
2674
2675	if (!capable(CAP_SYS_ADMIN))
2676		return -EPERM;
2677
2678	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2679		return -EINVAL;
2680
2681	if (f2fs_readonly(sbi->sb))
2682		return -EROFS;
2683
2684	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2685							sizeof(range)))
2686		return -EFAULT;
2687
2688	/* verify alignment of offset & size */
2689	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2690		return -EINVAL;
2691
2692	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2693					max_file_blocks(inode)))
2694		return -EINVAL;
2695
2696	err = mnt_want_write_file(filp);
2697	if (err)
2698		return err;
2699
2700	err = f2fs_defragment_range(sbi, filp, &range);
2701	mnt_drop_write_file(filp);
2702
2703	f2fs_update_time(sbi, REQ_TIME);
2704	if (err < 0)
2705		return err;
2706
2707	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2708							sizeof(range)))
2709		return -EFAULT;
2710
2711	return 0;
2712}
2713
2714static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2715			struct file *file_out, loff_t pos_out, size_t len)
2716{
2717	struct inode *src = file_inode(file_in);
2718	struct inode *dst = file_inode(file_out);
2719	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2720	size_t olen = len, dst_max_i_size = 0;
2721	size_t dst_osize;
2722	int ret;
2723
2724	if (file_in->f_path.mnt != file_out->f_path.mnt ||
2725				src->i_sb != dst->i_sb)
2726		return -EXDEV;
2727
2728	if (unlikely(f2fs_readonly(src->i_sb)))
2729		return -EROFS;
2730
2731	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2732		return -EINVAL;
2733
2734	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2735		return -EOPNOTSUPP;
2736
2737	if (pos_out < 0 || pos_in < 0)
2738		return -EINVAL;
2739
2740	if (src == dst) {
2741		if (pos_in == pos_out)
2742			return 0;
2743		if (pos_out > pos_in && pos_out < pos_in + len)
2744			return -EINVAL;
2745	}
2746
2747	inode_lock(src);
2748	if (src != dst) {
2749		ret = -EBUSY;
2750		if (!inode_trylock(dst))
2751			goto out;
2752	}
2753
2754	ret = -EINVAL;
2755	if (pos_in + len > src->i_size || pos_in + len < pos_in)
2756		goto out_unlock;
2757	if (len == 0)
2758		olen = len = src->i_size - pos_in;
2759	if (pos_in + len == src->i_size)
2760		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2761	if (len == 0) {
2762		ret = 0;
2763		goto out_unlock;
2764	}
2765
2766	dst_osize = dst->i_size;
2767	if (pos_out + olen > dst->i_size)
2768		dst_max_i_size = pos_out + olen;
2769
2770	/* verify the end result is block aligned */
2771	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2772			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2773			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2774		goto out_unlock;
2775
2776	ret = f2fs_convert_inline_inode(src);
2777	if (ret)
2778		goto out_unlock;
2779
2780	ret = f2fs_convert_inline_inode(dst);
2781	if (ret)
2782		goto out_unlock;
2783
2784	/* write out all dirty pages from offset */
2785	ret = filemap_write_and_wait_range(src->i_mapping,
2786					pos_in, pos_in + len);
2787	if (ret)
2788		goto out_unlock;
2789
2790	ret = filemap_write_and_wait_range(dst->i_mapping,
2791					pos_out, pos_out + len);
2792	if (ret)
2793		goto out_unlock;
2794
2795	f2fs_balance_fs(sbi, true);
2796
2797	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2798	if (src != dst) {
2799		ret = -EBUSY;
2800		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2801			goto out_src;
2802	}
2803
2804	f2fs_lock_op(sbi);
2805	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2806				pos_out >> F2FS_BLKSIZE_BITS,
2807				len >> F2FS_BLKSIZE_BITS, false);
2808
2809	if (!ret) {
2810		if (dst_max_i_size)
2811			f2fs_i_size_write(dst, dst_max_i_size);
2812		else if (dst_osize != dst->i_size)
2813			f2fs_i_size_write(dst, dst_osize);
2814	}
2815	f2fs_unlock_op(sbi);
2816
2817	if (src != dst)
2818		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2819out_src:
2820	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2821out_unlock:
2822	if (src != dst)
2823		inode_unlock(dst);
2824out:
2825	inode_unlock(src);
2826	return ret;
2827}
2828
2829static int __f2fs_ioc_move_range(struct file *filp,
2830				struct f2fs_move_range *range)
2831{
2832	struct fd dst;
2833	int err;
2834
2835	if (!(filp->f_mode & FMODE_READ) ||
2836			!(filp->f_mode & FMODE_WRITE))
2837		return -EBADF;
2838
2839	dst = fdget(range->dst_fd);
2840	if (!dst.file)
2841		return -EBADF;
2842
2843	if (!(dst.file->f_mode & FMODE_WRITE)) {
2844		err = -EBADF;
2845		goto err_out;
2846	}
2847
2848	err = mnt_want_write_file(filp);
2849	if (err)
2850		goto err_out;
2851
2852	err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2853					range->pos_out, range->len);
2854
2855	mnt_drop_write_file(filp);
2856err_out:
2857	fdput(dst);
2858	return err;
2859}
2860
2861static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2862{
2863	struct f2fs_move_range range;
2864
2865	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2866							sizeof(range)))
2867		return -EFAULT;
2868	return __f2fs_ioc_move_range(filp, &range);
2869}
2870
2871static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2872{
2873	struct inode *inode = file_inode(filp);
2874	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2875	struct sit_info *sm = SIT_I(sbi);
2876	unsigned int start_segno = 0, end_segno = 0;
2877	unsigned int dev_start_segno = 0, dev_end_segno = 0;
2878	struct f2fs_flush_device range;
2879	int ret;
2880
2881	if (!capable(CAP_SYS_ADMIN))
2882		return -EPERM;
2883
2884	if (f2fs_readonly(sbi->sb))
2885		return -EROFS;
2886
2887	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2888		return -EINVAL;
2889
2890	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2891							sizeof(range)))
2892		return -EFAULT;
2893
2894	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2895			__is_large_section(sbi)) {
2896		f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2897			  range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2898		return -EINVAL;
2899	}
2900
2901	ret = mnt_want_write_file(filp);
2902	if (ret)
2903		return ret;
2904
2905	if (range.dev_num != 0)
2906		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2907	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2908
2909	start_segno = sm->last_victim[FLUSH_DEVICE];
2910	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2911		start_segno = dev_start_segno;
2912	end_segno = min(start_segno + range.segments, dev_end_segno);
2913
2914	while (start_segno < end_segno) {
2915		if (!down_write_trylock(&sbi->gc_lock)) {
2916			ret = -EBUSY;
2917			goto out;
2918		}
2919		sm->last_victim[GC_CB] = end_segno + 1;
2920		sm->last_victim[GC_GREEDY] = end_segno + 1;
2921		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2922		ret = f2fs_gc(sbi, true, true, true, start_segno);
2923		if (ret == -EAGAIN)
2924			ret = 0;
2925		else if (ret < 0)
2926			break;
2927		start_segno++;
2928	}
2929out:
2930	mnt_drop_write_file(filp);
2931	return ret;
2932}
2933
2934static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2935{
2936	struct inode *inode = file_inode(filp);
2937	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2938
2939	/* Must validate to set it with SQLite behavior in Android. */
2940	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2941
2942	return put_user(sb_feature, (u32 __user *)arg);
2943}
2944
2945#ifdef CONFIG_QUOTA
2946int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2947{
2948	struct dquot *transfer_to[MAXQUOTAS] = {};
2949	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2950	struct super_block *sb = sbi->sb;
2951	int err = 0;
2952
2953	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2954	if (!IS_ERR(transfer_to[PRJQUOTA])) {
2955		err = __dquot_transfer(inode, transfer_to);
2956		if (err)
2957			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2958		dqput(transfer_to[PRJQUOTA]);
2959	}
2960	return err;
2961}
2962
2963static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2964{
2965	struct f2fs_inode_info *fi = F2FS_I(inode);
2966	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2967	struct page *ipage;
2968	kprojid_t kprojid;
2969	int err;
2970
2971	if (!f2fs_sb_has_project_quota(sbi)) {
2972		if (projid != F2FS_DEF_PROJID)
2973			return -EOPNOTSUPP;
2974		else
2975			return 0;
2976	}
2977
2978	if (!f2fs_has_extra_attr(inode))
2979		return -EOPNOTSUPP;
2980
2981	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2982
2983	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2984		return 0;
2985
2986	err = -EPERM;
2987	/* Is it quota file? Do not allow user to mess with it */
2988	if (IS_NOQUOTA(inode))
2989		return err;
2990
2991	ipage = f2fs_get_node_page(sbi, inode->i_ino);
2992	if (IS_ERR(ipage))
2993		return PTR_ERR(ipage);
2994
2995	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2996								i_projid)) {
2997		err = -EOVERFLOW;
2998		f2fs_put_page(ipage, 1);
2999		return err;
3000	}
3001	f2fs_put_page(ipage, 1);
3002
3003	err = dquot_initialize(inode);
3004	if (err)
3005		return err;
3006
3007	f2fs_lock_op(sbi);
3008	err = f2fs_transfer_project_quota(inode, kprojid);
3009	if (err)
3010		goto out_unlock;
3011
3012	F2FS_I(inode)->i_projid = kprojid;
3013	inode->i_ctime = current_time(inode);
3014	f2fs_mark_inode_dirty_sync(inode, true);
3015out_unlock:
3016	f2fs_unlock_op(sbi);
3017	return err;
3018}
3019#else
3020int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3021{
3022	return 0;
3023}
3024
3025static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3026{
3027	if (projid != F2FS_DEF_PROJID)
3028		return -EOPNOTSUPP;
3029	return 0;
3030}
3031#endif
3032
3033int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3034{
3035	struct inode *inode = d_inode(dentry);
3036	struct f2fs_inode_info *fi = F2FS_I(inode);
3037	u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3038
3039	if (IS_ENCRYPTED(inode))
3040		fsflags |= FS_ENCRYPT_FL;
3041	if (IS_VERITY(inode))
3042		fsflags |= FS_VERITY_FL;
3043	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3044		fsflags |= FS_INLINE_DATA_FL;
3045	if (is_inode_flag_set(inode, FI_PIN_FILE))
3046		fsflags |= FS_NOCOW_FL;
3047
3048	fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3049
3050	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3051		fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3052
3053	return 0;
3054}
3055
3056int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3057		      struct dentry *dentry, struct fileattr *fa)
3058{
3059	struct inode *inode = d_inode(dentry);
3060	u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3061	u32 iflags;
3062	int err;
3063
3064	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3065		return -EIO;
3066	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3067		return -ENOSPC;
3068	if (fsflags & ~F2FS_GETTABLE_FS_FL)
3069		return -EOPNOTSUPP;
3070	fsflags &= F2FS_SETTABLE_FS_FL;
3071	if (!fa->flags_valid)
3072		mask &= FS_COMMON_FL;
3073
3074	iflags = f2fs_fsflags_to_iflags(fsflags);
3075	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3076		return -EOPNOTSUPP;
3077
3078	err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3079	if (!err)
3080		err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3081
3082	return err;
3083}
3084
3085int f2fs_pin_file_control(struct inode *inode, bool inc)
3086{
3087	struct f2fs_inode_info *fi = F2FS_I(inode);
3088	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3089
3090	/* Use i_gc_failures for normal file as a risk signal. */
3091	if (inc)
3092		f2fs_i_gc_failures_write(inode,
3093				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3094
3095	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3096		f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3097			  __func__, inode->i_ino,
3098			  fi->i_gc_failures[GC_FAILURE_PIN]);
3099		clear_inode_flag(inode, FI_PIN_FILE);
3100		return -EAGAIN;
3101	}
3102	return 0;
3103}
3104
3105static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3106{
3107	struct inode *inode = file_inode(filp);
3108	__u32 pin;
3109	int ret = 0;
3110
3111	if (get_user(pin, (__u32 __user *)arg))
3112		return -EFAULT;
3113
3114	if (!S_ISREG(inode->i_mode))
3115		return -EINVAL;
3116
3117	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3118		return -EROFS;
3119
3120	ret = mnt_want_write_file(filp);
3121	if (ret)
3122		return ret;
3123
3124	inode_lock(inode);
3125
3126	if (f2fs_should_update_outplace(inode, NULL)) {
3127		ret = -EINVAL;
3128		goto out;
3129	}
3130
3131	if (!pin) {
3132		clear_inode_flag(inode, FI_PIN_FILE);
3133		f2fs_i_gc_failures_write(inode, 0);
3134		goto done;
3135	}
3136
3137	if (f2fs_pin_file_control(inode, false)) {
3138		ret = -EAGAIN;
3139		goto out;
3140	}
3141
3142	ret = f2fs_convert_inline_inode(inode);
3143	if (ret)
3144		goto out;
3145
3146	if (!f2fs_disable_compressed_file(inode)) {
3147		ret = -EOPNOTSUPP;
3148		goto out;
3149	}
3150
3151	set_inode_flag(inode, FI_PIN_FILE);
3152	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3153done:
3154	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3155out:
3156	inode_unlock(inode);
3157	mnt_drop_write_file(filp);
3158	return ret;
3159}
3160
3161static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3162{
3163	struct inode *inode = file_inode(filp);
3164	__u32 pin = 0;
3165
3166	if (is_inode_flag_set(inode, FI_PIN_FILE))
3167		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3168	return put_user(pin, (u32 __user *)arg);
3169}
3170
3171int f2fs_precache_extents(struct inode *inode)
3172{
3173	struct f2fs_inode_info *fi = F2FS_I(inode);
3174	struct f2fs_map_blocks map;
3175	pgoff_t m_next_extent;
3176	loff_t end;
3177	int err;
3178
3179	if (is_inode_flag_set(inode, FI_NO_EXTENT))
3180		return -EOPNOTSUPP;
3181
3182	map.m_lblk = 0;
3183	map.m_next_pgofs = NULL;
3184	map.m_next_extent = &m_next_extent;
3185	map.m_seg_type = NO_CHECK_TYPE;
3186	map.m_may_create = false;
3187	end = max_file_blocks(inode);
3188
3189	while (map.m_lblk < end) {
3190		map.m_len = end - map.m_lblk;
3191
3192		down_write(&fi->i_gc_rwsem[WRITE]);
3193		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3194		up_write(&fi->i_gc_rwsem[WRITE]);
3195		if (err)
3196			return err;
3197
3198		map.m_lblk = m_next_extent;
3199	}
3200
3201	return 0;
3202}
3203
3204static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3205{
3206	return f2fs_precache_extents(file_inode(filp));
3207}
3208
3209static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3210{
3211	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3212	__u64 block_count;
3213
3214	if (!capable(CAP_SYS_ADMIN))
3215		return -EPERM;
3216
3217	if (f2fs_readonly(sbi->sb))
3218		return -EROFS;
3219
3220	if (copy_from_user(&block_count, (void __user *)arg,
3221			   sizeof(block_count)))
3222		return -EFAULT;
3223
3224	return f2fs_resize_fs(sbi, block_count);
3225}
3226
3227static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3228{
3229	struct inode *inode = file_inode(filp);
3230
3231	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3232
3233	if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3234		f2fs_warn(F2FS_I_SB(inode),
3235			  "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3236			  inode->i_ino);
3237		return -EOPNOTSUPP;
3238	}
3239
3240	return fsverity_ioctl_enable(filp, (const void __user *)arg);
3241}
3242
3243static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3244{
3245	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3246		return -EOPNOTSUPP;
3247
3248	return fsverity_ioctl_measure(filp, (void __user *)arg);
3249}
3250
3251static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3252{
3253	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3254		return -EOPNOTSUPP;
3255
3256	return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3257}
3258
3259static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3260{
3261	struct inode *inode = file_inode(filp);
3262	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3263	char *vbuf;
3264	int count;
3265	int err = 0;
3266
3267	vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3268	if (!vbuf)
3269		return -ENOMEM;
3270
3271	down_read(&sbi->sb_lock);
3272	count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3273			ARRAY_SIZE(sbi->raw_super->volume_name),
3274			UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3275	up_read(&sbi->sb_lock);
3276
3277	if (copy_to_user((char __user *)arg, vbuf,
3278				min(FSLABEL_MAX, count)))
3279		err = -EFAULT;
3280
3281	kfree(vbuf);
3282	return err;
3283}
3284
3285static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3286{
3287	struct inode *inode = file_inode(filp);
3288	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3289	char *vbuf;
3290	int err = 0;
3291
3292	if (!capable(CAP_SYS_ADMIN))
3293		return -EPERM;
3294
3295	vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3296	if (IS_ERR(vbuf))
3297		return PTR_ERR(vbuf);
3298
3299	err = mnt_want_write_file(filp);
3300	if (err)
3301		goto out;
3302
3303	down_write(&sbi->sb_lock);
3304
3305	memset(sbi->raw_super->volume_name, 0,
3306			sizeof(sbi->raw_super->volume_name));
3307	utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3308			sbi->raw_super->volume_name,
3309			ARRAY_SIZE(sbi->raw_super->volume_name));
3310
3311	err = f2fs_commit_super(sbi, false);
3312
3313	up_write(&sbi->sb_lock);
3314
3315	mnt_drop_write_file(filp);
3316out:
3317	kfree(vbuf);
3318	return err;
3319}
3320
3321static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3322{
3323	struct inode *inode = file_inode(filp);
3324	__u64 blocks;
3325
3326	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3327		return -EOPNOTSUPP;
3328
3329	if (!f2fs_compressed_file(inode))
3330		return -EINVAL;
3331
3332	blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3333	return put_user(blocks, (u64 __user *)arg);
3334}
3335
3336static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3337{
3338	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3339	unsigned int released_blocks = 0;
3340	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3341	block_t blkaddr;
3342	int i;
3343
3344	for (i = 0; i < count; i++) {
3345		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3346						dn->ofs_in_node + i);
3347
3348		if (!__is_valid_data_blkaddr(blkaddr))
3349			continue;
3350		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3351					DATA_GENERIC_ENHANCE)))
3352			return -EFSCORRUPTED;
3353	}
3354
3355	while (count) {
3356		int compr_blocks = 0;
3357
3358		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3359			blkaddr = f2fs_data_blkaddr(dn);
3360
3361			if (i == 0) {
3362				if (blkaddr == COMPRESS_ADDR)
3363					continue;
3364				dn->ofs_in_node += cluster_size;
3365				goto next;
3366			}
3367
3368			if (__is_valid_data_blkaddr(blkaddr))
3369				compr_blocks++;
3370
3371			if (blkaddr != NEW_ADDR)
3372				continue;
3373
3374			dn->data_blkaddr = NULL_ADDR;
3375			f2fs_set_data_blkaddr(dn);
3376		}
3377
3378		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3379		dec_valid_block_count(sbi, dn->inode,
3380					cluster_size - compr_blocks);
3381
3382		released_blocks += cluster_size - compr_blocks;
3383next:
3384		count -= cluster_size;
3385	}
3386
3387	return released_blocks;
3388}
3389
3390static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3391{
3392	struct inode *inode = file_inode(filp);
3393	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3394	pgoff_t page_idx = 0, last_idx;
3395	unsigned int released_blocks = 0;
3396	int ret;
3397	int writecount;
3398
3399	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3400		return -EOPNOTSUPP;
3401
3402	if (!f2fs_compressed_file(inode))
3403		return -EINVAL;
3404
3405	if (f2fs_readonly(sbi->sb))
3406		return -EROFS;
3407
3408	ret = mnt_want_write_file(filp);
3409	if (ret)
3410		return ret;
3411
3412	f2fs_balance_fs(F2FS_I_SB(inode), true);
3413
3414	inode_lock(inode);
3415
3416	writecount = atomic_read(&inode->i_writecount);
3417	if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3418			(!(filp->f_mode & FMODE_WRITE) && writecount)) {
3419		ret = -EBUSY;
3420		goto out;
3421	}
3422
3423	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3424		ret = -EINVAL;
3425		goto out;
3426	}
3427
3428	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3429	if (ret)
3430		goto out;
3431
3432	set_inode_flag(inode, FI_COMPRESS_RELEASED);
3433	inode->i_ctime = current_time(inode);
3434	f2fs_mark_inode_dirty_sync(inode, true);
3435
3436	if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3437		goto out;
3438
3439	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3440	down_write(&F2FS_I(inode)->i_mmap_sem);
3441
3442	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3443
3444	while (page_idx < last_idx) {
3445		struct dnode_of_data dn;
3446		pgoff_t end_offset, count;
3447
3448		set_new_dnode(&dn, inode, NULL, NULL, 0);
3449		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3450		if (ret) {
3451			if (ret == -ENOENT) {
3452				page_idx = f2fs_get_next_page_offset(&dn,
3453								page_idx);
3454				ret = 0;
3455				continue;
3456			}
3457			break;
3458		}
3459
3460		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3461		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3462		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3463
3464		ret = release_compress_blocks(&dn, count);
3465
3466		f2fs_put_dnode(&dn);
3467
3468		if (ret < 0)
3469			break;
3470
3471		page_idx += count;
3472		released_blocks += ret;
3473	}
3474
3475	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3476	up_write(&F2FS_I(inode)->i_mmap_sem);
3477out:
3478	inode_unlock(inode);
3479
3480	mnt_drop_write_file(filp);
3481
3482	if (ret >= 0) {
3483		ret = put_user(released_blocks, (u64 __user *)arg);
3484	} else if (released_blocks &&
3485			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3486		set_sbi_flag(sbi, SBI_NEED_FSCK);
3487		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3488			"iblocks=%llu, released=%u, compr_blocks=%u, "
3489			"run fsck to fix.",
3490			__func__, inode->i_ino, inode->i_blocks,
3491			released_blocks,
3492			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3493	}
3494
3495	return ret;
3496}
3497
3498static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3499{
3500	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3501	unsigned int reserved_blocks = 0;
3502	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3503	block_t blkaddr;
3504	int i;
3505
3506	for (i = 0; i < count; i++) {
3507		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3508						dn->ofs_in_node + i);
3509
3510		if (!__is_valid_data_blkaddr(blkaddr))
3511			continue;
3512		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3513					DATA_GENERIC_ENHANCE)))
3514			return -EFSCORRUPTED;
3515	}
3516
3517	while (count) {
3518		int compr_blocks = 0;
3519		blkcnt_t reserved;
3520		int ret;
3521
3522		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3523			blkaddr = f2fs_data_blkaddr(dn);
3524
3525			if (i == 0) {
3526				if (blkaddr == COMPRESS_ADDR)
3527					continue;
3528				dn->ofs_in_node += cluster_size;
3529				goto next;
3530			}
3531
3532			if (__is_valid_data_blkaddr(blkaddr)) {
3533				compr_blocks++;
3534				continue;
3535			}
3536
3537			dn->data_blkaddr = NEW_ADDR;
3538			f2fs_set_data_blkaddr(dn);
3539		}
3540
3541		reserved = cluster_size - compr_blocks;
3542		ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3543		if (ret)
3544			return ret;
3545
3546		if (reserved != cluster_size - compr_blocks)
3547			return -ENOSPC;
3548
3549		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3550
3551		reserved_blocks += reserved;
3552next:
3553		count -= cluster_size;
3554	}
3555
3556	return reserved_blocks;
3557}
3558
3559static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3560{
3561	struct inode *inode = file_inode(filp);
3562	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3563	pgoff_t page_idx = 0, last_idx;
3564	unsigned int reserved_blocks = 0;
3565	int ret;
3566
3567	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3568		return -EOPNOTSUPP;
3569
3570	if (!f2fs_compressed_file(inode))
3571		return -EINVAL;
3572
3573	if (f2fs_readonly(sbi->sb))
3574		return -EROFS;
3575
3576	ret = mnt_want_write_file(filp);
3577	if (ret)
3578		return ret;
3579
3580	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3581		goto out;
3582
3583	f2fs_balance_fs(F2FS_I_SB(inode), true);
3584
3585	inode_lock(inode);
3586
3587	if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3588		ret = -EINVAL;
3589		goto unlock_inode;
3590	}
3591
3592	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3593	down_write(&F2FS_I(inode)->i_mmap_sem);
3594
3595	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3596
3597	while (page_idx < last_idx) {
3598		struct dnode_of_data dn;
3599		pgoff_t end_offset, count;
3600
3601		set_new_dnode(&dn, inode, NULL, NULL, 0);
3602		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3603		if (ret) {
3604			if (ret == -ENOENT) {
3605				page_idx = f2fs_get_next_page_offset(&dn,
3606								page_idx);
3607				ret = 0;
3608				continue;
3609			}
3610			break;
3611		}
3612
3613		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3614		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3615		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3616
3617		ret = reserve_compress_blocks(&dn, count);
3618
3619		f2fs_put_dnode(&dn);
3620
3621		if (ret < 0)
3622			break;
3623
3624		page_idx += count;
3625		reserved_blocks += ret;
3626	}
3627
3628	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3629	up_write(&F2FS_I(inode)->i_mmap_sem);
3630
3631	if (ret >= 0) {
3632		clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3633		inode->i_ctime = current_time(inode);
3634		f2fs_mark_inode_dirty_sync(inode, true);
3635	}
3636unlock_inode:
3637	inode_unlock(inode);
3638out:
3639	mnt_drop_write_file(filp);
3640
3641	if (ret >= 0) {
3642		ret = put_user(reserved_blocks, (u64 __user *)arg);
3643	} else if (reserved_blocks &&
3644			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3645		set_sbi_flag(sbi, SBI_NEED_FSCK);
3646		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3647			"iblocks=%llu, reserved=%u, compr_blocks=%u, "
3648			"run fsck to fix.",
3649			__func__, inode->i_ino, inode->i_blocks,
3650			reserved_blocks,
3651			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3652	}
3653
3654	return ret;
3655}
3656
3657static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3658		pgoff_t off, block_t block, block_t len, u32 flags)
3659{
3660	struct request_queue *q = bdev_get_queue(bdev);
3661	sector_t sector = SECTOR_FROM_BLOCK(block);
3662	sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3663	int ret = 0;
3664
3665	if (!q)
3666		return -ENXIO;
3667
3668	if (flags & F2FS_TRIM_FILE_DISCARD)
3669		ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3670						blk_queue_secure_erase(q) ?
3671						BLKDEV_DISCARD_SECURE : 0);
3672
3673	if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3674		if (IS_ENCRYPTED(inode))
3675			ret = fscrypt_zeroout_range(inode, off, block, len);
3676		else
3677			ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3678					GFP_NOFS, 0);
3679	}
3680
3681	return ret;
3682}
3683
3684static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3685{
3686	struct inode *inode = file_inode(filp);
3687	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3688	struct address_space *mapping = inode->i_mapping;
3689	struct block_device *prev_bdev = NULL;
3690	struct f2fs_sectrim_range range;
3691	pgoff_t index, pg_end, prev_index = 0;
3692	block_t prev_block = 0, len = 0;
3693	loff_t end_addr;
3694	bool to_end = false;
3695	int ret = 0;
3696
3697	if (!(filp->f_mode & FMODE_WRITE))
3698		return -EBADF;
3699
3700	if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3701				sizeof(range)))
3702		return -EFAULT;
3703
3704	if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3705			!S_ISREG(inode->i_mode))
3706		return -EINVAL;
3707
3708	if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3709			!f2fs_hw_support_discard(sbi)) ||
3710			((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3711			 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3712		return -EOPNOTSUPP;
3713
3714	file_start_write(filp);
3715	inode_lock(inode);
3716
3717	if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3718			range.start >= inode->i_size) {
3719		ret = -EINVAL;
3720		goto err;
3721	}
3722
3723	if (range.len == 0)
3724		goto err;
3725
3726	if (inode->i_size - range.start > range.len) {
3727		end_addr = range.start + range.len;
3728	} else {
3729		end_addr = range.len == (u64)-1 ?
3730			sbi->sb->s_maxbytes : inode->i_size;
3731		to_end = true;
3732	}
3733
3734	if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3735			(!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3736		ret = -EINVAL;
3737		goto err;
3738	}
3739
3740	index = F2FS_BYTES_TO_BLK(range.start);
3741	pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3742
3743	ret = f2fs_convert_inline_inode(inode);
3744	if (ret)
3745		goto err;
3746
3747	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3748	down_write(&F2FS_I(inode)->i_mmap_sem);
3749
3750	ret = filemap_write_and_wait_range(mapping, range.start,
3751			to_end ? LLONG_MAX : end_addr - 1);
3752	if (ret)
3753		goto out;
3754
3755	truncate_inode_pages_range(mapping, range.start,
3756			to_end ? -1 : end_addr - 1);
3757
3758	while (index < pg_end) {
3759		struct dnode_of_data dn;
3760		pgoff_t end_offset, count;
3761		int i;
3762
3763		set_new_dnode(&dn, inode, NULL, NULL, 0);
3764		ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3765		if (ret) {
3766			if (ret == -ENOENT) {
3767				index = f2fs_get_next_page_offset(&dn, index);
3768				continue;
3769			}
3770			goto out;
3771		}
3772
3773		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3774		count = min(end_offset - dn.ofs_in_node, pg_end - index);
3775		for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3776			struct block_device *cur_bdev;
3777			block_t blkaddr = f2fs_data_blkaddr(&dn);
3778
3779			if (!__is_valid_data_blkaddr(blkaddr))
3780				continue;
3781
3782			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3783						DATA_GENERIC_ENHANCE)) {
3784				ret = -EFSCORRUPTED;
3785				f2fs_put_dnode(&dn);
3786				goto out;
3787			}
3788
3789			cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3790			if (f2fs_is_multi_device(sbi)) {
3791				int di = f2fs_target_device_index(sbi, blkaddr);
3792
3793				blkaddr -= FDEV(di).start_blk;
3794			}
3795
3796			if (len) {
3797				if (prev_bdev == cur_bdev &&
3798						index == prev_index + len &&
3799						blkaddr == prev_block + len) {
3800					len++;
3801				} else {
3802					ret = f2fs_secure_erase(prev_bdev,
3803						inode, prev_index, prev_block,
3804						len, range.flags);
3805					if (ret) {
3806						f2fs_put_dnode(&dn);
3807						goto out;
3808					}
3809
3810					len = 0;
3811				}
3812			}
3813
3814			if (!len) {
3815				prev_bdev = cur_bdev;
3816				prev_index = index;
3817				prev_block = blkaddr;
3818				len = 1;
3819			}
3820		}
3821
3822		f2fs_put_dnode(&dn);
3823
3824		if (fatal_signal_pending(current)) {
3825			ret = -EINTR;
3826			goto out;
3827		}
3828		cond_resched();
3829	}
3830
3831	if (len)
3832		ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3833				prev_block, len, range.flags);
3834out:
3835	up_write(&F2FS_I(inode)->i_mmap_sem);
3836	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3837err:
3838	inode_unlock(inode);
3839	file_end_write(filp);
3840
3841	return ret;
3842}
3843
3844static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3845{
3846	struct inode *inode = file_inode(filp);
3847	struct f2fs_comp_option option;
3848
3849	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3850		return -EOPNOTSUPP;
3851
3852	inode_lock_shared(inode);
3853
3854	if (!f2fs_compressed_file(inode)) {
3855		inode_unlock_shared(inode);
3856		return -ENODATA;
3857	}
3858
3859	option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3860	option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3861
3862	inode_unlock_shared(inode);
3863
3864	if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3865				sizeof(option)))
3866		return -EFAULT;
3867
3868	return 0;
3869}
3870
3871static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3872{
3873	struct inode *inode = file_inode(filp);
3874	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3875	struct f2fs_comp_option option;
3876	int ret = 0;
3877
3878	if (!f2fs_sb_has_compression(sbi))
3879		return -EOPNOTSUPP;
3880
3881	if (!(filp->f_mode & FMODE_WRITE))
3882		return -EBADF;
3883
3884	if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3885				sizeof(option)))
3886		return -EFAULT;
3887
3888	if (!f2fs_compressed_file(inode) ||
3889			option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3890			option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3891			option.algorithm >= COMPRESS_MAX)
3892		return -EINVAL;
3893
3894	file_start_write(filp);
3895	inode_lock(inode);
3896
3897	if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3898		ret = -EBUSY;
3899		goto out;
3900	}
3901
3902	if (inode->i_size != 0) {
3903		ret = -EFBIG;
3904		goto out;
3905	}
3906
3907	F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3908	F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3909	F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3910	f2fs_mark_inode_dirty_sync(inode, true);
3911
3912	if (!f2fs_is_compress_backend_ready(inode))
3913		f2fs_warn(sbi, "compression algorithm is successfully set, "
3914			"but current kernel doesn't support this algorithm.");
3915out:
3916	inode_unlock(inode);
3917	file_end_write(filp);
3918
3919	return ret;
3920}
3921
3922static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3923{
3924	DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3925	struct address_space *mapping = inode->i_mapping;
3926	struct page *page;
3927	pgoff_t redirty_idx = page_idx;
3928	int i, page_len = 0, ret = 0;
3929
3930	page_cache_ra_unbounded(&ractl, len, 0);
3931
3932	for (i = 0; i < len; i++, page_idx++) {
3933		page = read_cache_page(mapping, page_idx, NULL, NULL);
3934		if (IS_ERR(page)) {
3935			ret = PTR_ERR(page);
3936			break;
3937		}
3938		page_len++;
3939	}
3940
3941	for (i = 0; i < page_len; i++, redirty_idx++) {
3942		page = find_lock_page(mapping, redirty_idx);
3943		if (!page) {
3944			ret = -ENOMEM;
3945			break;
3946		}
3947		set_page_dirty(page);
3948		f2fs_put_page(page, 1);
3949		f2fs_put_page(page, 0);
3950	}
3951
3952	return ret;
3953}
3954
3955static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3956{
3957	struct inode *inode = file_inode(filp);
3958	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3959	struct f2fs_inode_info *fi = F2FS_I(inode);
3960	pgoff_t page_idx = 0, last_idx;
3961	unsigned int blk_per_seg = sbi->blocks_per_seg;
3962	int cluster_size = F2FS_I(inode)->i_cluster_size;
3963	int count, ret;
3964
3965	if (!f2fs_sb_has_compression(sbi) ||
3966			F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3967		return -EOPNOTSUPP;
3968
3969	if (!(filp->f_mode & FMODE_WRITE))
3970		return -EBADF;
3971
3972	if (!f2fs_compressed_file(inode))
3973		return -EINVAL;
3974
3975	f2fs_balance_fs(F2FS_I_SB(inode), true);
3976
3977	file_start_write(filp);
3978	inode_lock(inode);
3979
3980	if (!f2fs_is_compress_backend_ready(inode)) {
3981		ret = -EOPNOTSUPP;
3982		goto out;
3983	}
3984
3985	if (f2fs_is_mmap_file(inode)) {
3986		ret = -EBUSY;
3987		goto out;
3988	}
3989
3990	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3991	if (ret)
3992		goto out;
3993
3994	if (!atomic_read(&fi->i_compr_blocks))
3995		goto out;
3996
3997	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3998
3999	count = last_idx - page_idx;
4000	while (count) {
4001		int len = min(cluster_size, count);
4002
4003		ret = redirty_blocks(inode, page_idx, len);
4004		if (ret < 0)
4005			break;
4006
4007		if (get_dirty_pages(inode) >= blk_per_seg)
4008			filemap_fdatawrite(inode->i_mapping);
4009
4010		count -= len;
4011		page_idx += len;
4012	}
4013
4014	if (!ret)
4015		ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4016							LLONG_MAX);
4017
4018	if (ret)
4019		f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4020			  __func__, ret);
4021out:
4022	inode_unlock(inode);
4023	file_end_write(filp);
4024
4025	return ret;
4026}
4027
4028static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4029{
4030	struct inode *inode = file_inode(filp);
4031	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4032	pgoff_t page_idx = 0, last_idx;
4033	unsigned int blk_per_seg = sbi->blocks_per_seg;
4034	int cluster_size = F2FS_I(inode)->i_cluster_size;
4035	int count, ret;
4036
4037	if (!f2fs_sb_has_compression(sbi) ||
4038			F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4039		return -EOPNOTSUPP;
4040
4041	if (!(filp->f_mode & FMODE_WRITE))
4042		return -EBADF;
4043
4044	if (!f2fs_compressed_file(inode))
4045		return -EINVAL;
4046
4047	f2fs_balance_fs(F2FS_I_SB(inode), true);
4048
4049	file_start_write(filp);
4050	inode_lock(inode);
4051
4052	if (!f2fs_is_compress_backend_ready(inode)) {
4053		ret = -EOPNOTSUPP;
4054		goto out;
4055	}
4056
4057	if (f2fs_is_mmap_file(inode)) {
4058		ret = -EBUSY;
4059		goto out;
4060	}
4061
4062	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4063	if (ret)
4064		goto out;
4065
4066	set_inode_flag(inode, FI_ENABLE_COMPRESS);
4067
4068	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4069
4070	count = last_idx - page_idx;
4071	while (count) {
4072		int len = min(cluster_size, count);
4073
4074		ret = redirty_blocks(inode, page_idx, len);
4075		if (ret < 0)
4076			break;
4077
4078		if (get_dirty_pages(inode) >= blk_per_seg)
4079			filemap_fdatawrite(inode->i_mapping);
4080
4081		count -= len;
4082		page_idx += len;
4083	}
4084
4085	if (!ret)
4086		ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4087							LLONG_MAX);
4088
4089	clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4090
4091	if (ret)
4092		f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4093			  __func__, ret);
4094out:
4095	inode_unlock(inode);
4096	file_end_write(filp);
4097
4098	return ret;
4099}
4100
4101static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4102{
4103	switch (cmd) {
4104	case FS_IOC_GETVERSION:
 
 
 
 
4105		return f2fs_ioc_getversion(filp, arg);
4106	case F2FS_IOC_START_ATOMIC_WRITE:
4107		return f2fs_ioc_start_atomic_write(filp);
4108	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4109		return f2fs_ioc_commit_atomic_write(filp);
4110	case F2FS_IOC_START_VOLATILE_WRITE:
4111		return f2fs_ioc_start_volatile_write(filp);
4112	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4113		return f2fs_ioc_release_volatile_write(filp);
4114	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4115		return f2fs_ioc_abort_volatile_write(filp);
4116	case F2FS_IOC_SHUTDOWN:
4117		return f2fs_ioc_shutdown(filp, arg);
4118	case FITRIM:
4119		return f2fs_ioc_fitrim(filp, arg);
4120	case FS_IOC_SET_ENCRYPTION_POLICY:
4121		return f2fs_ioc_set_encryption_policy(filp, arg);
4122	case FS_IOC_GET_ENCRYPTION_POLICY:
4123		return f2fs_ioc_get_encryption_policy(filp, arg);
4124	case FS_IOC_GET_ENCRYPTION_PWSALT:
4125		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4126	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4127		return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4128	case FS_IOC_ADD_ENCRYPTION_KEY:
4129		return f2fs_ioc_add_encryption_key(filp, arg);
4130	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4131		return f2fs_ioc_remove_encryption_key(filp, arg);
4132	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4133		return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4134	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4135		return f2fs_ioc_get_encryption_key_status(filp, arg);
4136	case FS_IOC_GET_ENCRYPTION_NONCE:
4137		return f2fs_ioc_get_encryption_nonce(filp, arg);
4138	case F2FS_IOC_GARBAGE_COLLECT:
4139		return f2fs_ioc_gc(filp, arg);
4140	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4141		return f2fs_ioc_gc_range(filp, arg);
4142	case F2FS_IOC_WRITE_CHECKPOINT:
4143		return f2fs_ioc_write_checkpoint(filp, arg);
4144	case F2FS_IOC_DEFRAGMENT:
4145		return f2fs_ioc_defragment(filp, arg);
4146	case F2FS_IOC_MOVE_RANGE:
4147		return f2fs_ioc_move_range(filp, arg);
4148	case F2FS_IOC_FLUSH_DEVICE:
4149		return f2fs_ioc_flush_device(filp, arg);
4150	case F2FS_IOC_GET_FEATURES:
4151		return f2fs_ioc_get_features(filp, arg);
4152	case F2FS_IOC_GET_PIN_FILE:
4153		return f2fs_ioc_get_pin_file(filp, arg);
4154	case F2FS_IOC_SET_PIN_FILE:
4155		return f2fs_ioc_set_pin_file(filp, arg);
4156	case F2FS_IOC_PRECACHE_EXTENTS:
4157		return f2fs_ioc_precache_extents(filp, arg);
4158	case F2FS_IOC_RESIZE_FS:
4159		return f2fs_ioc_resize_fs(filp, arg);
4160	case FS_IOC_ENABLE_VERITY:
4161		return f2fs_ioc_enable_verity(filp, arg);
4162	case FS_IOC_MEASURE_VERITY:
4163		return f2fs_ioc_measure_verity(filp, arg);
4164	case FS_IOC_READ_VERITY_METADATA:
4165		return f2fs_ioc_read_verity_metadata(filp, arg);
4166	case FS_IOC_GETFSLABEL:
4167		return f2fs_ioc_getfslabel(filp, arg);
4168	case FS_IOC_SETFSLABEL:
4169		return f2fs_ioc_setfslabel(filp, arg);
4170	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4171		return f2fs_get_compress_blocks(filp, arg);
4172	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4173		return f2fs_release_compress_blocks(filp, arg);
4174	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4175		return f2fs_reserve_compress_blocks(filp, arg);
4176	case F2FS_IOC_SEC_TRIM_FILE:
4177		return f2fs_sec_trim_file(filp, arg);
4178	case F2FS_IOC_GET_COMPRESS_OPTION:
4179		return f2fs_ioc_get_compress_option(filp, arg);
4180	case F2FS_IOC_SET_COMPRESS_OPTION:
4181		return f2fs_ioc_set_compress_option(filp, arg);
4182	case F2FS_IOC_DECOMPRESS_FILE:
4183		return f2fs_ioc_decompress_file(filp, arg);
4184	case F2FS_IOC_COMPRESS_FILE:
4185		return f2fs_ioc_compress_file(filp, arg);
4186	default:
4187		return -ENOTTY;
4188	}
4189}
4190
4191long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4192{
4193	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4194		return -EIO;
4195	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4196		return -ENOSPC;
4197
4198	return __f2fs_ioctl(filp, cmd, arg);
4199}
4200
4201static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4202{
4203	struct file *file = iocb->ki_filp;
4204	struct inode *inode = file_inode(file);
4205	int ret;
4206
4207	if (!f2fs_is_compress_backend_ready(inode))
4208		return -EOPNOTSUPP;
4209
4210	ret = generic_file_read_iter(iocb, iter);
4211
4212	if (ret > 0)
4213		f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4214
4215	return ret;
4216}
4217
4218static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4219{
4220	struct file *file = iocb->ki_filp;
4221	struct inode *inode = file_inode(file);
4222	ssize_t ret;
4223
4224	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4225		ret = -EIO;
4226		goto out;
4227	}
4228
4229	if (!f2fs_is_compress_backend_ready(inode)) {
4230		ret = -EOPNOTSUPP;
4231		goto out;
4232	}
4233
4234	if (iocb->ki_flags & IOCB_NOWAIT) {
4235		if (!inode_trylock(inode)) {
4236			ret = -EAGAIN;
4237			goto out;
4238		}
4239	} else {
4240		inode_lock(inode);
4241	}
4242
4243	if (unlikely(IS_IMMUTABLE(inode))) {
4244		ret = -EPERM;
4245		goto unlock;
4246	}
4247
4248	if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4249		ret = -EPERM;
4250		goto unlock;
4251	}
4252
 
4253	ret = generic_write_checks(iocb, from);
4254	if (ret > 0) {
4255		bool preallocated = false;
4256		size_t target_size = 0;
4257		int err;
4258
4259		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4260			set_inode_flag(inode, FI_NO_PREALLOC);
4261
4262		if ((iocb->ki_flags & IOCB_NOWAIT)) {
4263			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4264						iov_iter_count(from)) ||
4265				f2fs_has_inline_data(inode) ||
4266				f2fs_force_buffered_io(inode, iocb, from)) {
4267				clear_inode_flag(inode, FI_NO_PREALLOC);
4268				inode_unlock(inode);
4269				ret = -EAGAIN;
4270				goto out;
4271			}
4272			goto write;
4273		}
4274
4275		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4276			goto write;
4277
4278		if (iocb->ki_flags & IOCB_DIRECT) {
4279			/*
4280			 * Convert inline data for Direct I/O before entering
4281			 * f2fs_direct_IO().
4282			 */
4283			err = f2fs_convert_inline_inode(inode);
4284			if (err)
4285				goto out_err;
4286			/*
4287			 * If force_buffere_io() is true, we have to allocate
4288			 * blocks all the time, since f2fs_direct_IO will fall
4289			 * back to buffered IO.
4290			 */
4291			if (!f2fs_force_buffered_io(inode, iocb, from) &&
4292					allow_outplace_dio(inode, iocb, from))
4293				goto write;
4294		}
4295		preallocated = true;
4296		target_size = iocb->ki_pos + iov_iter_count(from);
4297
4298		err = f2fs_preallocate_blocks(iocb, from);
4299		if (err) {
4300out_err:
4301			clear_inode_flag(inode, FI_NO_PREALLOC);
4302			inode_unlock(inode);
4303			ret = err;
4304			goto out;
4305		}
4306write:
4307		ret = __generic_file_write_iter(iocb, from);
4308		clear_inode_flag(inode, FI_NO_PREALLOC);
4309
4310		/* if we couldn't write data, we should deallocate blocks. */
4311		if (preallocated && i_size_read(inode) < target_size) {
4312			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4313			down_write(&F2FS_I(inode)->i_mmap_sem);
4314			f2fs_truncate(inode);
4315			up_write(&F2FS_I(inode)->i_mmap_sem);
4316			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4317		}
4318
4319		if (ret > 0)
4320			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4321	}
4322unlock:
4323	inode_unlock(inode);
4324out:
4325	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4326					iov_iter_count(from), ret);
4327	if (ret > 0)
4328		ret = generic_write_sync(iocb, ret);
4329	return ret;
4330}
4331
4332#ifdef CONFIG_COMPAT
4333struct compat_f2fs_gc_range {
4334	u32 sync;
4335	compat_u64 start;
4336	compat_u64 len;
4337};
4338#define F2FS_IOC32_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,\
4339						struct compat_f2fs_gc_range)
4340
4341static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4342{
4343	struct compat_f2fs_gc_range __user *urange;
4344	struct f2fs_gc_range range;
4345	int err;
4346
4347	urange = compat_ptr(arg);
4348	err = get_user(range.sync, &urange->sync);
4349	err |= get_user(range.start, &urange->start);
4350	err |= get_user(range.len, &urange->len);
4351	if (err)
4352		return -EFAULT;
4353
4354	return __f2fs_ioc_gc_range(file, &range);
4355}
4356
4357struct compat_f2fs_move_range {
4358	u32 dst_fd;
4359	compat_u64 pos_in;
4360	compat_u64 pos_out;
4361	compat_u64 len;
4362};
4363#define F2FS_IOC32_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
4364					struct compat_f2fs_move_range)
4365
4366static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4367{
4368	struct compat_f2fs_move_range __user *urange;
4369	struct f2fs_move_range range;
4370	int err;
4371
4372	urange = compat_ptr(arg);
4373	err = get_user(range.dst_fd, &urange->dst_fd);
4374	err |= get_user(range.pos_in, &urange->pos_in);
4375	err |= get_user(range.pos_out, &urange->pos_out);
4376	err |= get_user(range.len, &urange->len);
4377	if (err)
4378		return -EFAULT;
4379
4380	return __f2fs_ioc_move_range(file, &range);
4381}
4382
4383long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4384{
4385	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4386		return -EIO;
4387	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4388		return -ENOSPC;
4389
4390	switch (cmd) {
4391	case FS_IOC32_GETVERSION:
4392		cmd = FS_IOC_GETVERSION;
 
 
 
 
 
 
4393		break;
4394	case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4395		return f2fs_compat_ioc_gc_range(file, arg);
4396	case F2FS_IOC32_MOVE_RANGE:
4397		return f2fs_compat_ioc_move_range(file, arg);
4398	case F2FS_IOC_START_ATOMIC_WRITE:
4399	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4400	case F2FS_IOC_START_VOLATILE_WRITE:
4401	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4402	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4403	case F2FS_IOC_SHUTDOWN:
4404	case FITRIM:
4405	case FS_IOC_SET_ENCRYPTION_POLICY:
4406	case FS_IOC_GET_ENCRYPTION_PWSALT:
4407	case FS_IOC_GET_ENCRYPTION_POLICY:
4408	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4409	case FS_IOC_ADD_ENCRYPTION_KEY:
4410	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4411	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4412	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4413	case FS_IOC_GET_ENCRYPTION_NONCE:
4414	case F2FS_IOC_GARBAGE_COLLECT:
4415	case F2FS_IOC_WRITE_CHECKPOINT:
4416	case F2FS_IOC_DEFRAGMENT:
4417	case F2FS_IOC_FLUSH_DEVICE:
4418	case F2FS_IOC_GET_FEATURES:
4419	case F2FS_IOC_GET_PIN_FILE:
4420	case F2FS_IOC_SET_PIN_FILE:
4421	case F2FS_IOC_PRECACHE_EXTENTS:
4422	case F2FS_IOC_RESIZE_FS:
4423	case FS_IOC_ENABLE_VERITY:
4424	case FS_IOC_MEASURE_VERITY:
4425	case FS_IOC_READ_VERITY_METADATA:
4426	case FS_IOC_GETFSLABEL:
4427	case FS_IOC_SETFSLABEL:
4428	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4429	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4430	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4431	case F2FS_IOC_SEC_TRIM_FILE:
4432	case F2FS_IOC_GET_COMPRESS_OPTION:
4433	case F2FS_IOC_SET_COMPRESS_OPTION:
4434	case F2FS_IOC_DECOMPRESS_FILE:
4435	case F2FS_IOC_COMPRESS_FILE:
4436		break;
4437	default:
4438		return -ENOIOCTLCMD;
4439	}
4440	return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4441}
4442#endif
4443
4444const struct file_operations f2fs_file_operations = {
4445	.llseek		= f2fs_llseek,
4446	.read_iter	= f2fs_file_read_iter,
4447	.write_iter	= f2fs_file_write_iter,
4448	.open		= f2fs_file_open,
4449	.release	= f2fs_release_file,
4450	.mmap		= f2fs_file_mmap,
4451	.flush		= f2fs_file_flush,
4452	.fsync		= f2fs_sync_file,
4453	.fallocate	= f2fs_fallocate,
4454	.unlocked_ioctl	= f2fs_ioctl,
4455#ifdef CONFIG_COMPAT
4456	.compat_ioctl	= f2fs_compat_ioctl,
4457#endif
4458	.splice_read	= generic_file_splice_read,
4459	.splice_write	= iter_file_splice_write,
4460};
v4.6
 
   1/*
   2 * fs/f2fs/file.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/fs.h>
  12#include <linux/f2fs_fs.h>
  13#include <linux/stat.h>
  14#include <linux/buffer_head.h>
  15#include <linux/writeback.h>
  16#include <linux/blkdev.h>
  17#include <linux/falloc.h>
  18#include <linux/types.h>
  19#include <linux/compat.h>
  20#include <linux/uaccess.h>
  21#include <linux/mount.h>
  22#include <linux/pagevec.h>
  23#include <linux/random.h>
 
 
 
 
 
  24
  25#include "f2fs.h"
  26#include "node.h"
  27#include "segment.h"
  28#include "xattr.h"
  29#include "acl.h"
  30#include "gc.h"
  31#include "trace.h"
  32#include <trace/events/f2fs.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  33
  34static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
  35						struct vm_fault *vmf)
 
 
 
 
  36{
  37	struct page *page = vmf->page;
  38	struct inode *inode = file_inode(vma->vm_file);
  39	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  40	struct dnode_of_data dn;
  41	int err;
 
 
 
 
 
 
 
 
 
 
 
 
  42
  43	sb_start_pagefault(inode->i_sb);
 
 
 
  44
  45	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
 
 
  46
  47	/* block allocation */
  48	f2fs_lock_op(sbi);
  49	set_new_dnode(&dn, inode, NULL, NULL, 0);
  50	err = f2fs_reserve_block(&dn, page->index);
  51	if (err) {
  52		f2fs_unlock_op(sbi);
  53		goto out;
 
 
 
  54	}
  55	f2fs_put_dnode(&dn);
  56	f2fs_unlock_op(sbi);
 
 
 
 
  57
  58	f2fs_balance_fs(sbi, dn.node_changed);
  59
  60	file_update_time(vma->vm_file);
 
  61	lock_page(page);
  62	if (unlikely(page->mapping != inode->i_mapping ||
  63			page_offset(page) > i_size_read(inode) ||
  64			!PageUptodate(page))) {
  65		unlock_page(page);
  66		err = -EFAULT;
  67		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  68	}
  69
 
 
 
 
 
  70	/*
  71	 * check to see if the page is mapped already (no holes)
  72	 */
  73	if (PageMappedToDisk(page))
  74		goto mapped;
  75
  76	/* page is wholly or partially inside EOF */
  77	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
  78						i_size_read(inode)) {
  79		unsigned offset;
 
  80		offset = i_size_read(inode) & ~PAGE_MASK;
  81		zero_user_segment(page, offset, PAGE_SIZE);
  82	}
  83	set_page_dirty(page);
  84	SetPageUptodate(page);
 
 
 
 
  85
  86	trace_f2fs_vm_page_mkwrite(page, DATA);
  87mapped:
  88	/* fill the page */
  89	f2fs_wait_on_page_writeback(page, DATA, false);
  90
  91	/* wait for GCed encrypted page writeback */
  92	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
  93		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
  94
  95	/* if gced page is attached, don't write to cold segment */
  96	clear_cold_data(page);
  97out:
  98	sb_end_pagefault(inode->i_sb);
  99	f2fs_update_time(sbi, REQ_TIME);
 100	return block_page_mkwrite_return(err);
 101}
 102
 103static const struct vm_operations_struct f2fs_file_vm_ops = {
 104	.fault		= filemap_fault,
 105	.map_pages	= filemap_map_pages,
 106	.page_mkwrite	= f2fs_vm_page_mkwrite,
 107};
 108
 109static int get_parent_ino(struct inode *inode, nid_t *pino)
 110{
 111	struct dentry *dentry;
 112
 113	inode = igrab(inode);
 114	dentry = d_find_any_alias(inode);
 115	iput(inode);
 
 
 116	if (!dentry)
 117		return 0;
 118
 119	if (update_dent_inode(inode, inode, &dentry->d_name)) {
 120		dput(dentry);
 121		return 0;
 122	}
 123
 124	*pino = parent_ino(dentry);
 125	dput(dentry);
 126	return 1;
 127}
 128
 129static inline bool need_do_checkpoint(struct inode *inode)
 130{
 131	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 132	bool need_cp = false;
 133
 134	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
 135		need_cp = true;
 136	else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
 137		need_cp = true;
 
 
 
 
 138	else if (file_wrong_pino(inode))
 139		need_cp = true;
 140	else if (!space_for_roll_forward(sbi))
 141		need_cp = true;
 142	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
 143		need_cp = true;
 144	else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
 145		need_cp = true;
 146	else if (test_opt(sbi, FASTBOOT))
 147		need_cp = true;
 148	else if (sbi->active_logs == 2)
 149		need_cp = true;
 
 
 
 
 
 150
 151	return need_cp;
 152}
 153
 154static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
 155{
 156	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
 157	bool ret = false;
 158	/* But we need to avoid that there are some inode updates */
 159	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
 160		ret = true;
 161	f2fs_put_page(i, 0);
 162	return ret;
 163}
 164
 165static void try_to_fix_pino(struct inode *inode)
 166{
 167	struct f2fs_inode_info *fi = F2FS_I(inode);
 168	nid_t pino;
 169
 170	down_write(&fi->i_sem);
 171	fi->xattr_ver = 0;
 172	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
 173			get_parent_ino(inode, &pino)) {
 174		fi->i_pino = pino;
 175		file_got_pino(inode);
 176		up_write(&fi->i_sem);
 177
 178		mark_inode_dirty_sync(inode);
 179		f2fs_write_inode(inode, NULL);
 180	} else {
 181		up_write(&fi->i_sem);
 182	}
 
 183}
 184
 185int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
 186{
 187	struct inode *inode = file->f_mapping->host;
 188	struct f2fs_inode_info *fi = F2FS_I(inode);
 189	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 190	nid_t ino = inode->i_ino;
 191	int ret = 0;
 192	bool need_cp = false;
 193	struct writeback_control wbc = {
 194		.sync_mode = WB_SYNC_ALL,
 195		.nr_to_write = LONG_MAX,
 196		.for_reclaim = 0,
 197	};
 
 198
 199	if (unlikely(f2fs_readonly(inode->i_sb)))
 200		return 0;
 201
 202	trace_f2fs_sync_file_enter(inode);
 203
 
 
 
 204	/* if fdatasync is triggered, let's do in-place-update */
 205	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
 206		set_inode_flag(fi, FI_NEED_IPU);
 207	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
 208	clear_inode_flag(fi, FI_NEED_IPU);
 209
 210	if (ret) {
 211		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
 212		return ret;
 213	}
 214
 215	/* if the inode is dirty, let's recover all the time */
 216	if (!datasync) {
 217		f2fs_write_inode(inode, NULL);
 218		goto go_write;
 219	}
 220
 221	/*
 222	 * if there is no written data, don't waste time to write recovery info.
 223	 */
 224	if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
 225			!exist_written_data(sbi, ino, APPEND_INO)) {
 226
 227		/* it may call write_inode just prior to fsync */
 228		if (need_inode_page_update(sbi, ino))
 229			goto go_write;
 230
 231		if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
 232				exist_written_data(sbi, ino, UPDATE_INO))
 233			goto flush_out;
 234		goto out;
 235	}
 236go_write:
 237	/*
 238	 * Both of fdatasync() and fsync() are able to be recovered from
 239	 * sudden-power-off.
 240	 */
 241	down_read(&fi->i_sem);
 242	need_cp = need_do_checkpoint(inode);
 243	up_read(&fi->i_sem);
 244
 245	if (need_cp) {
 246		/* all the dirty node pages should be flushed for POR */
 247		ret = f2fs_sync_fs(inode->i_sb, 1);
 248
 249		/*
 250		 * We've secured consistency through sync_fs. Following pino
 251		 * will be used only for fsynced inodes after checkpoint.
 252		 */
 253		try_to_fix_pino(inode);
 254		clear_inode_flag(fi, FI_APPEND_WRITE);
 255		clear_inode_flag(fi, FI_UPDATE_WRITE);
 256		goto out;
 257	}
 258sync_nodes:
 259	sync_node_pages(sbi, ino, &wbc);
 
 
 
 
 260
 261	/* if cp_error was enabled, we should avoid infinite loop */
 262	if (unlikely(f2fs_cp_error(sbi))) {
 263		ret = -EIO;
 264		goto out;
 265	}
 266
 267	if (need_inode_block_update(sbi, ino)) {
 268		mark_inode_dirty_sync(inode);
 269		f2fs_write_inode(inode, NULL);
 270		goto sync_nodes;
 271	}
 272
 273	ret = wait_on_node_pages_writeback(sbi, ino);
 274	if (ret)
 275		goto out;
 
 
 
 
 
 
 
 
 
 
 276
 277	/* once recovery info is written, don't need to tack this */
 278	remove_ino_entry(sbi, ino, APPEND_INO);
 279	clear_inode_flag(fi, FI_APPEND_WRITE);
 280flush_out:
 281	remove_ino_entry(sbi, ino, UPDATE_INO);
 282	clear_inode_flag(fi, FI_UPDATE_WRITE);
 283	ret = f2fs_issue_flush(sbi);
 
 
 
 
 284	f2fs_update_time(sbi, REQ_TIME);
 285out:
 286	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
 287	f2fs_trace_ios(NULL, 1);
 288	return ret;
 289}
 290
 291static pgoff_t __get_first_dirty_index(struct address_space *mapping,
 292						pgoff_t pgofs, int whence)
 293{
 294	struct pagevec pvec;
 295	int nr_pages;
 296
 297	if (whence != SEEK_DATA)
 298		return 0;
 299
 300	/* find first dirty page index */
 301	pagevec_init(&pvec, 0);
 302	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
 303					PAGECACHE_TAG_DIRTY, 1);
 304	pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
 305	pagevec_release(&pvec);
 306	return pgofs;
 307}
 308
 309static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
 310							int whence)
 311{
 312	switch (whence) {
 313	case SEEK_DATA:
 314		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
 315			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
 
 
 316			return true;
 317		break;
 318	case SEEK_HOLE:
 319		if (blkaddr == NULL_ADDR)
 320			return true;
 321		break;
 322	}
 323	return false;
 324}
 325
 326static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
 327{
 328	struct inode *inode = file->f_mapping->host;
 329	loff_t maxbytes = inode->i_sb->s_maxbytes;
 330	struct dnode_of_data dn;
 331	pgoff_t pgofs, end_offset, dirty;
 332	loff_t data_ofs = offset;
 333	loff_t isize;
 334	int err = 0;
 335
 336	inode_lock(inode);
 337
 338	isize = i_size_read(inode);
 339	if (offset >= isize)
 340		goto fail;
 341
 342	/* handle inline data case */
 343	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
 344		if (whence == SEEK_HOLE)
 345			data_ofs = isize;
 346		goto found;
 
 
 
 
 347	}
 348
 349	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
 350
 351	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
 352
 353	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 354		set_new_dnode(&dn, inode, NULL, NULL, 0);
 355		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
 356		if (err && err != -ENOENT) {
 357			goto fail;
 358		} else if (err == -ENOENT) {
 359			/* direct node does not exists */
 360			if (whence == SEEK_DATA) {
 361				pgofs = get_next_page_offset(&dn, pgofs);
 362				continue;
 363			} else {
 364				goto found;
 365			}
 366		}
 367
 368		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 369
 370		/* find data/hole in dnode block */
 371		for (; dn.ofs_in_node < end_offset;
 372				dn.ofs_in_node++, pgofs++,
 373				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 374			block_t blkaddr;
 375			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
 376
 377			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
 
 
 
 
 
 
 
 
 
 
 378				f2fs_put_dnode(&dn);
 379				goto found;
 380			}
 381		}
 382		f2fs_put_dnode(&dn);
 383	}
 384
 385	if (whence == SEEK_DATA)
 386		goto fail;
 387found:
 388	if (whence == SEEK_HOLE && data_ofs > isize)
 389		data_ofs = isize;
 390	inode_unlock(inode);
 391	return vfs_setpos(file, data_ofs, maxbytes);
 392fail:
 393	inode_unlock(inode);
 394	return -ENXIO;
 395}
 396
 397static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
 398{
 399	struct inode *inode = file->f_mapping->host;
 400	loff_t maxbytes = inode->i_sb->s_maxbytes;
 401
 
 
 
 402	switch (whence) {
 403	case SEEK_SET:
 404	case SEEK_CUR:
 405	case SEEK_END:
 406		return generic_file_llseek_size(file, offset, whence,
 407						maxbytes, i_size_read(inode));
 408	case SEEK_DATA:
 409	case SEEK_HOLE:
 410		if (offset < 0)
 411			return -ENXIO;
 412		return f2fs_seek_block(file, offset, whence);
 413	}
 414
 415	return -EINVAL;
 416}
 417
 418static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 419{
 420	struct inode *inode = file_inode(file);
 421	int err;
 422
 423	if (f2fs_encrypted_inode(inode)) {
 424		err = fscrypt_get_encryption_info(inode);
 425		if (err)
 426			return 0;
 427		if (!f2fs_encrypted_inode(inode))
 428			return -ENOKEY;
 429	}
 430
 431	/* we don't need to use inline_data strictly */
 432	err = f2fs_convert_inline_inode(inode);
 433	if (err)
 434		return err;
 435
 436	file_accessed(file);
 437	vma->vm_ops = &f2fs_file_vm_ops;
 
 438	return 0;
 439}
 440
 441static int f2fs_file_open(struct inode *inode, struct file *filp)
 442{
 443	int ret = generic_file_open(inode, filp);
 444	struct dentry *dir;
 
 
 
 
 
 
 
 
 
 
 
 445
 446	if (!ret && f2fs_encrypted_inode(inode)) {
 447		ret = fscrypt_get_encryption_info(inode);
 448		if (ret)
 449			return -EACCES;
 450		if (!fscrypt_has_encryption_key(inode))
 451			return -ENOKEY;
 452	}
 453	dir = dget_parent(file_dentry(filp));
 454	if (f2fs_encrypted_inode(d_inode(dir)) &&
 455			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
 456		dput(dir);
 457		return -EPERM;
 458	}
 459	dput(dir);
 460	return ret;
 461}
 462
 463int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 464{
 465	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 466	struct f2fs_node *raw_node;
 467	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
 468	__le32 *addr;
 
 
 
 
 
 
 
 
 469
 470	raw_node = F2FS_NODE(dn->node_page);
 471	addr = blkaddr_in_node(raw_node) + ofs;
 472
 473	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
 
 474		block_t blkaddr = le32_to_cpu(*addr);
 
 
 
 
 
 
 
 
 
 
 475		if (blkaddr == NULL_ADDR)
 476			continue;
 477
 478		dn->data_blkaddr = NULL_ADDR;
 479		set_data_blkaddr(dn);
 480		invalidate_blocks(sbi, blkaddr);
 
 
 
 
 
 
 
 
 481		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 482			clear_inode_flag(F2FS_I(dn->inode),
 483						FI_FIRST_BLOCK_WRITTEN);
 484		nr_free++;
 
 
 
 485	}
 486
 
 
 
 487	if (nr_free) {
 488		pgoff_t fofs;
 489		/*
 490		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
 491		 * we will invalidate all blkaddr in the whole range.
 492		 */
 493		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
 494							dn->inode) + ofs;
 495		f2fs_update_extent_cache_range(dn, fofs, 0, len);
 496		dec_valid_block_count(sbi, dn->inode, nr_free);
 497		sync_inode_page(dn);
 498	}
 499	dn->ofs_in_node = ofs;
 500
 501	f2fs_update_time(sbi, REQ_TIME);
 502	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
 503					 dn->ofs_in_node, nr_free);
 504	return nr_free;
 505}
 506
 507void truncate_data_blocks(struct dnode_of_data *dn)
 508{
 509	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
 510}
 511
 512static int truncate_partial_data_page(struct inode *inode, u64 from,
 513								bool cache_only)
 514{
 515	unsigned offset = from & (PAGE_SIZE - 1);
 516	pgoff_t index = from >> PAGE_SHIFT;
 517	struct address_space *mapping = inode->i_mapping;
 518	struct page *page;
 519
 520	if (!offset && !cache_only)
 521		return 0;
 522
 523	if (cache_only) {
 524		page = f2fs_grab_cache_page(mapping, index, false);
 525		if (page && PageUptodate(page))
 526			goto truncate_out;
 527		f2fs_put_page(page, 1);
 528		return 0;
 529	}
 530
 531	page = get_lock_data_page(inode, index, true);
 532	if (IS_ERR(page))
 533		return 0;
 534truncate_out:
 535	f2fs_wait_on_page_writeback(page, DATA, true);
 536	zero_user(page, offset, PAGE_SIZE - offset);
 537	if (!cache_only || !f2fs_encrypted_inode(inode) ||
 538					!S_ISREG(inode->i_mode))
 
 
 539		set_page_dirty(page);
 540	f2fs_put_page(page, 1);
 541	return 0;
 542}
 543
 544int truncate_blocks(struct inode *inode, u64 from, bool lock)
 545{
 546	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 547	unsigned int blocksize = inode->i_sb->s_blocksize;
 548	struct dnode_of_data dn;
 549	pgoff_t free_from;
 550	int count = 0, err = 0;
 551	struct page *ipage;
 552	bool truncate_page = false;
 553
 554	trace_f2fs_truncate_blocks_enter(inode, from);
 555
 556	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
 
 
 
 557
 558	if (lock)
 559		f2fs_lock_op(sbi);
 560
 561	ipage = get_node_page(sbi, inode->i_ino);
 562	if (IS_ERR(ipage)) {
 563		err = PTR_ERR(ipage);
 564		goto out;
 565	}
 566
 567	if (f2fs_has_inline_data(inode)) {
 568		if (truncate_inline_inode(ipage, from))
 569			set_page_dirty(ipage);
 570		f2fs_put_page(ipage, 1);
 571		truncate_page = true;
 572		goto out;
 573	}
 574
 575	set_new_dnode(&dn, inode, ipage, NULL, 0);
 576	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
 577	if (err) {
 578		if (err == -ENOENT)
 579			goto free_next;
 580		goto out;
 581	}
 582
 583	count = ADDRS_PER_PAGE(dn.node_page, inode);
 584
 585	count -= dn.ofs_in_node;
 586	f2fs_bug_on(sbi, count < 0);
 587
 588	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
 589		truncate_data_blocks_range(&dn, count);
 590		free_from += count;
 591	}
 592
 593	f2fs_put_dnode(&dn);
 594free_next:
 595	err = truncate_inode_blocks(inode, free_from);
 596out:
 597	if (lock)
 598		f2fs_unlock_op(sbi);
 599
 600	/* lastly zero out the first data page */
 601	if (!err)
 602		err = truncate_partial_data_page(inode, from, truncate_page);
 603
 604	trace_f2fs_truncate_blocks_exit(inode, err);
 605	return err;
 606}
 607
 608int f2fs_truncate(struct inode *inode, bool lock)
 609{
 
 610	int err;
 611
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 612	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 613				S_ISLNK(inode->i_mode)))
 614		return 0;
 615
 616	trace_f2fs_truncate(inode);
 617
 
 
 
 
 
 
 
 
 
 618	/* we should check inline_data size */
 619	if (!f2fs_may_inline_data(inode)) {
 620		err = f2fs_convert_inline_inode(inode);
 621		if (err)
 622			return err;
 623	}
 624
 625	err = truncate_blocks(inode, i_size_read(inode), lock);
 626	if (err)
 627		return err;
 628
 629	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 630	mark_inode_dirty(inode);
 631	return 0;
 632}
 633
 634int f2fs_getattr(struct vfsmount *mnt,
 635			 struct dentry *dentry, struct kstat *stat)
 636{
 637	struct inode *inode = d_inode(dentry);
 638	generic_fillattr(inode, stat);
 639	stat->blocks <<= 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640	return 0;
 641}
 642
 643#ifdef CONFIG_F2FS_FS_POSIX_ACL
 644static void __setattr_copy(struct inode *inode, const struct iattr *attr)
 
 645{
 646	struct f2fs_inode_info *fi = F2FS_I(inode);
 647	unsigned int ia_valid = attr->ia_valid;
 648
 649	if (ia_valid & ATTR_UID)
 650		inode->i_uid = attr->ia_uid;
 651	if (ia_valid & ATTR_GID)
 652		inode->i_gid = attr->ia_gid;
 653	if (ia_valid & ATTR_ATIME)
 654		inode->i_atime = timespec_trunc(attr->ia_atime,
 655						inode->i_sb->s_time_gran);
 656	if (ia_valid & ATTR_MTIME)
 657		inode->i_mtime = timespec_trunc(attr->ia_mtime,
 658						inode->i_sb->s_time_gran);
 659	if (ia_valid & ATTR_CTIME)
 660		inode->i_ctime = timespec_trunc(attr->ia_ctime,
 661						inode->i_sb->s_time_gran);
 662	if (ia_valid & ATTR_MODE) {
 663		umode_t mode = attr->ia_mode;
 
 664
 665		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
 666			mode &= ~S_ISGID;
 667		set_acl_inode(fi, mode);
 668	}
 669}
 670#else
 671#define __setattr_copy setattr_copy
 672#endif
 673
 674int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 
 675{
 676	struct inode *inode = d_inode(dentry);
 677	struct f2fs_inode_info *fi = F2FS_I(inode);
 678	int err;
 679
 680	err = inode_change_ok(inode, attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 681	if (err)
 682		return err;
 683
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684	if (attr->ia_valid & ATTR_SIZE) {
 685		if (f2fs_encrypted_inode(inode) &&
 686				fscrypt_get_encryption_info(inode))
 687			return -EACCES;
 688
 689		if (attr->ia_size <= i_size_read(inode)) {
 690			truncate_setsize(inode, attr->ia_size);
 691			err = f2fs_truncate(inode, true);
 
 692			if (err)
 693				return err;
 694			f2fs_balance_fs(F2FS_I_SB(inode), true);
 695		} else {
 696			/*
 697			 * do not trim all blocks after i_size if target size is
 698			 * larger than i_size.
 699			 */
 700			truncate_setsize(inode, attr->ia_size);
 
 
 
 
 
 
 
 
 
 
 701
 702			/* should convert inline inode here */
 703			if (!f2fs_may_inline_data(inode)) {
 704				err = f2fs_convert_inline_inode(inode);
 705				if (err)
 706					return err;
 707			}
 708			inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 709		}
 710	}
 711
 712	__setattr_copy(inode, attr);
 713
 714	if (attr->ia_valid & ATTR_MODE) {
 715		err = posix_acl_chmod(inode, get_inode_mode(inode));
 716		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
 717			inode->i_mode = fi->i_acl_mode;
 718			clear_inode_flag(fi, FI_ACL_MODE);
 
 
 719		}
 720	}
 721
 722	mark_inode_dirty(inode);
 
 
 
 
 
 723	return err;
 724}
 725
 726const struct inode_operations f2fs_file_inode_operations = {
 727	.getattr	= f2fs_getattr,
 728	.setattr	= f2fs_setattr,
 729	.get_acl	= f2fs_get_acl,
 730	.set_acl	= f2fs_set_acl,
 731#ifdef CONFIG_F2FS_FS_XATTR
 732	.setxattr	= generic_setxattr,
 733	.getxattr	= generic_getxattr,
 734	.listxattr	= f2fs_listxattr,
 735	.removexattr	= generic_removexattr,
 736#endif
 737	.fiemap		= f2fs_fiemap,
 
 
 738};
 739
 740static int fill_zero(struct inode *inode, pgoff_t index,
 741					loff_t start, loff_t len)
 742{
 743	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 744	struct page *page;
 745
 746	if (!len)
 747		return 0;
 748
 749	f2fs_balance_fs(sbi, true);
 750
 751	f2fs_lock_op(sbi);
 752	page = get_new_data_page(inode, NULL, index, false);
 753	f2fs_unlock_op(sbi);
 754
 755	if (IS_ERR(page))
 756		return PTR_ERR(page);
 757
 758	f2fs_wait_on_page_writeback(page, DATA, true);
 759	zero_user(page, start, len);
 760	set_page_dirty(page);
 761	f2fs_put_page(page, 1);
 762	return 0;
 763}
 764
 765int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
 766{
 767	int err;
 768
 769	while (pg_start < pg_end) {
 770		struct dnode_of_data dn;
 771		pgoff_t end_offset, count;
 772
 773		set_new_dnode(&dn, inode, NULL, NULL, 0);
 774		err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
 775		if (err) {
 776			if (err == -ENOENT) {
 777				pg_start++;
 
 778				continue;
 779			}
 780			return err;
 781		}
 782
 783		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 784		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
 785
 786		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
 787
 788		truncate_data_blocks_range(&dn, count);
 789		f2fs_put_dnode(&dn);
 790
 791		pg_start += count;
 792	}
 793	return 0;
 794}
 795
 796static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
 797{
 798	pgoff_t pg_start, pg_end;
 799	loff_t off_start, off_end;
 800	int ret;
 801
 802	ret = f2fs_convert_inline_inode(inode);
 803	if (ret)
 804		return ret;
 805
 806	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
 807	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 808
 809	off_start = offset & (PAGE_SIZE - 1);
 810	off_end = (offset + len) & (PAGE_SIZE - 1);
 811
 812	if (pg_start == pg_end) {
 813		ret = fill_zero(inode, pg_start, off_start,
 814						off_end - off_start);
 815		if (ret)
 816			return ret;
 817	} else {
 818		if (off_start) {
 819			ret = fill_zero(inode, pg_start++, off_start,
 820						PAGE_SIZE - off_start);
 821			if (ret)
 822				return ret;
 823		}
 824		if (off_end) {
 825			ret = fill_zero(inode, pg_end, 0, off_end);
 826			if (ret)
 827				return ret;
 828		}
 829
 830		if (pg_start < pg_end) {
 831			struct address_space *mapping = inode->i_mapping;
 832			loff_t blk_start, blk_end;
 833			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 834
 835			f2fs_balance_fs(sbi, true);
 836
 837			blk_start = (loff_t)pg_start << PAGE_SHIFT;
 838			blk_end = (loff_t)pg_end << PAGE_SHIFT;
 839			truncate_inode_pages_range(mapping, blk_start,
 840					blk_end - 1);
 
 
 
 841
 842			f2fs_lock_op(sbi);
 843			ret = truncate_hole(inode, pg_start, pg_end);
 844			f2fs_unlock_op(sbi);
 
 
 
 845		}
 846	}
 847
 848	return ret;
 849}
 850
 851static int __exchange_data_block(struct inode *inode, pgoff_t src,
 852					pgoff_t dst, bool full)
 853{
 854	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 855	struct dnode_of_data dn;
 856	block_t new_addr;
 857	bool do_replace = false;
 858	int ret;
 859
 
 860	set_new_dnode(&dn, inode, NULL, NULL, 0);
 861	ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
 862	if (ret && ret != -ENOENT) {
 863		return ret;
 864	} else if (ret == -ENOENT) {
 865		new_addr = NULL_ADDR;
 866	} else {
 867		new_addr = dn.data_blkaddr;
 868		if (!is_checkpointed_data(sbi, new_addr)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 869			/* do not invalidate this block address */
 870			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
 871			do_replace = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 872		}
 873		f2fs_put_dnode(&dn);
 874	}
 
 
 875
 876	if (new_addr == NULL_ADDR)
 877		return full ? truncate_hole(inode, dst, dst + 1) : 0;
 
 
 
 
 
 
 
 
 
 
 
 878
 879	if (do_replace) {
 880		struct page *ipage = get_node_page(sbi, inode->i_ino);
 881		struct node_info ni;
 
 
 882
 883		if (IS_ERR(ipage)) {
 884			ret = PTR_ERR(ipage);
 885			goto err_out;
 886		}
 887
 888		set_new_dnode(&dn, inode, ipage, NULL, 0);
 889		ret = f2fs_reserve_block(&dn, dst);
 890		if (ret)
 891			goto err_out;
 
 892
 893		truncate_data_blocks_range(&dn, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894
 895		get_node_info(sbi, dn.nid, &ni);
 896		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
 897				ni.version, true, false);
 898		f2fs_put_dnode(&dn);
 899	} else {
 900		struct page *psrc, *pdst;
 901
 902		psrc = get_lock_data_page(inode, src, true);
 903		if (IS_ERR(psrc))
 904			return PTR_ERR(psrc);
 905		pdst = get_new_data_page(inode, NULL, dst, true);
 906		if (IS_ERR(pdst)) {
 
 
 
 
 
 
 
 
 907			f2fs_put_page(psrc, 1);
 908			return PTR_ERR(pdst);
 
 
 
 
 
 909		}
 910		f2fs_copy_page(psrc, pdst);
 911		set_page_dirty(pdst);
 912		f2fs_put_page(pdst, 1);
 913		f2fs_put_page(psrc, 1);
 914
 915		return truncate_hole(inode, src, src + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916	}
 917	return 0;
 918
 919err_out:
 920	if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
 921		f2fs_update_data_blkaddr(&dn, new_addr);
 922		f2fs_put_dnode(&dn);
 923	}
 924	return ret;
 925}
 926
 927static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
 928{
 929	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 930	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 931	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932
 933	for (; end < nrpages; start++, end++) {
 934		f2fs_balance_fs(sbi, true);
 935		f2fs_lock_op(sbi);
 936		ret = __exchange_data_block(inode, end, start, true);
 937		f2fs_unlock_op(sbi);
 938		if (ret)
 939			break;
 940	}
 941	return ret;
 942}
 943
 944static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 945{
 946	pgoff_t pg_start, pg_end;
 947	loff_t new_size;
 948	int ret;
 949
 950	if (offset + len >= i_size_read(inode))
 951		return -EINVAL;
 952
 953	/* collapse range should be aligned to block size of f2fs. */
 954	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
 955		return -EINVAL;
 956
 957	ret = f2fs_convert_inline_inode(inode);
 958	if (ret)
 959		return ret;
 960
 961	pg_start = offset >> PAGE_SHIFT;
 962	pg_end = (offset + len) >> PAGE_SHIFT;
 963
 964	/* write out all dirty pages from offset */
 965	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 966	if (ret)
 967		return ret;
 968
 969	truncate_pagecache(inode, offset);
 970
 971	ret = f2fs_do_collapse(inode, pg_start, pg_end);
 972	if (ret)
 973		return ret;
 974
 975	/* write out all moved pages, if possible */
 
 976	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
 977	truncate_pagecache(inode, offset);
 978
 979	new_size = i_size_read(inode) - len;
 980	truncate_pagecache(inode, new_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 981
 982	ret = truncate_blocks(inode, new_size, true);
 983	if (!ret)
 984		i_size_write(inode, new_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985
 986	return ret;
 987}
 988
 989static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 990								int mode)
 991{
 992	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 993	struct address_space *mapping = inode->i_mapping;
 994	pgoff_t index, pg_start, pg_end;
 995	loff_t new_size = i_size_read(inode);
 996	loff_t off_start, off_end;
 997	int ret = 0;
 998
 999	ret = inode_newsize_ok(inode, (len + offset));
1000	if (ret)
1001		return ret;
1002
1003	ret = f2fs_convert_inline_inode(inode);
1004	if (ret)
1005		return ret;
1006
1007	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1008	if (ret)
1009		return ret;
1010
1011	truncate_pagecache_range(inode, offset, offset + len - 1);
1012
1013	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1014	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1015
1016	off_start = offset & (PAGE_SIZE - 1);
1017	off_end = (offset + len) & (PAGE_SIZE - 1);
1018
1019	if (pg_start == pg_end) {
1020		ret = fill_zero(inode, pg_start, off_start,
1021						off_end - off_start);
1022		if (ret)
1023			return ret;
1024
1025		if (offset + len > new_size)
1026			new_size = offset + len;
1027		new_size = max_t(loff_t, new_size, offset + len);
1028	} else {
1029		if (off_start) {
1030			ret = fill_zero(inode, pg_start++, off_start,
1031						PAGE_SIZE - off_start);
1032			if (ret)
1033				return ret;
1034
1035			new_size = max_t(loff_t, new_size,
1036					(loff_t)pg_start << PAGE_SHIFT);
1037		}
1038
1039		for (index = pg_start; index < pg_end; index++) {
1040			struct dnode_of_data dn;
1041			struct page *ipage;
 
 
 
 
 
 
 
 
1042
1043			f2fs_lock_op(sbi);
1044
1045			ipage = get_node_page(sbi, inode->i_ino);
1046			if (IS_ERR(ipage)) {
1047				ret = PTR_ERR(ipage);
1048				f2fs_unlock_op(sbi);
 
 
1049				goto out;
1050			}
1051
1052			set_new_dnode(&dn, inode, ipage, NULL, 0);
1053			ret = f2fs_reserve_block(&dn, index);
1054			if (ret) {
1055				f2fs_unlock_op(sbi);
1056				goto out;
1057			}
1058
1059			if (dn.data_blkaddr != NEW_ADDR) {
1060				invalidate_blocks(sbi, dn.data_blkaddr);
1061				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1062			}
1063			f2fs_put_dnode(&dn);
 
1064			f2fs_unlock_op(sbi);
 
 
 
 
1065
 
 
 
 
1066			new_size = max_t(loff_t, new_size,
1067				(loff_t)(index + 1) << PAGE_SHIFT);
1068		}
1069
1070		if (off_end) {
1071			ret = fill_zero(inode, pg_end, 0, off_end);
1072			if (ret)
1073				goto out;
1074
1075			new_size = max_t(loff_t, new_size, offset + len);
1076		}
1077	}
1078
1079out:
1080	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
1081		i_size_write(inode, new_size);
1082		mark_inode_dirty(inode);
1083		update_inode_page(inode);
 
1084	}
1085
1086	return ret;
1087}
1088
1089static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1090{
1091	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1092	pgoff_t pg_start, pg_end, delta, nrpages, idx;
1093	loff_t new_size;
1094	int ret = 0;
1095
1096	new_size = i_size_read(inode) + len;
1097	if (new_size > inode->i_sb->s_maxbytes)
1098		return -EFBIG;
 
1099
1100	if (offset >= i_size_read(inode))
1101		return -EINVAL;
1102
1103	/* insert range should be aligned to block size of f2fs. */
1104	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1105		return -EINVAL;
1106
1107	ret = f2fs_convert_inline_inode(inode);
1108	if (ret)
1109		return ret;
1110
1111	f2fs_balance_fs(sbi, true);
1112
1113	ret = truncate_blocks(inode, i_size_read(inode), true);
 
 
1114	if (ret)
1115		return ret;
1116
1117	/* write out all dirty pages from offset */
1118	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1119	if (ret)
1120		return ret;
1121
1122	truncate_pagecache(inode, offset);
1123
1124	pg_start = offset >> PAGE_SHIFT;
1125	pg_end = (offset + len) >> PAGE_SHIFT;
1126	delta = pg_end - pg_start;
1127	nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
1128
1129	for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
1130		f2fs_lock_op(sbi);
1131		ret = __exchange_data_block(inode, idx, idx + delta, false);
 
 
 
1132		f2fs_unlock_op(sbi);
1133		if (ret)
1134			break;
1135	}
 
 
1136
1137	/* write out all moved pages, if possible */
 
1138	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1139	truncate_pagecache(inode, offset);
 
1140
1141	if (!ret)
1142		i_size_write(inode, new_size);
1143	return ret;
1144}
1145
1146static int expand_inode_data(struct inode *inode, loff_t offset,
1147					loff_t len, int mode)
1148{
1149	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1150	pgoff_t index, pg_start, pg_end;
 
 
 
1151	loff_t new_size = i_size_read(inode);
1152	loff_t off_start, off_end;
1153	int ret = 0;
 
1154
1155	ret = inode_newsize_ok(inode, (len + offset));
1156	if (ret)
1157		return ret;
1158
1159	ret = f2fs_convert_inline_inode(inode);
1160	if (ret)
1161		return ret;
1162
1163	f2fs_balance_fs(sbi, true);
1164
1165	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1166	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167
1168	off_start = offset & (PAGE_SIZE - 1);
1169	off_end = (offset + len) & (PAGE_SIZE - 1);
 
 
 
1170
1171	f2fs_lock_op(sbi);
 
 
 
 
 
 
 
1172
1173	for (index = pg_start; index <= pg_end; index++) {
1174		struct dnode_of_data dn;
1175
1176		if (index == pg_end && !off_end)
1177			goto noalloc;
1178
1179		set_new_dnode(&dn, inode, NULL, NULL, 0);
1180		ret = f2fs_reserve_block(&dn, index);
1181		if (ret)
1182			break;
1183noalloc:
1184		if (pg_start == pg_end)
1185			new_size = offset + len;
1186		else if (index == pg_start && off_start)
1187			new_size = (loff_t)(index + 1) << PAGE_SHIFT;
1188		else if (index == pg_end)
1189			new_size = ((loff_t)index << PAGE_SHIFT) +
1190								off_end;
1191		else
1192			new_size += PAGE_SIZE;
1193	}
1194
1195	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1196		i_size_read(inode) < new_size) {
1197		i_size_write(inode, new_size);
1198		mark_inode_dirty(inode);
1199		update_inode_page(inode);
1200	}
1201	f2fs_unlock_op(sbi);
1202
1203	return ret;
1204}
1205
1206static long f2fs_fallocate(struct file *file, int mode,
1207				loff_t offset, loff_t len)
1208{
1209	struct inode *inode = file_inode(file);
1210	long ret = 0;
1211
 
 
 
 
 
 
 
1212	/* f2fs only support ->fallocate for regular file */
1213	if (!S_ISREG(inode->i_mode))
1214		return -EINVAL;
1215
1216	if (f2fs_encrypted_inode(inode) &&
1217		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1218		return -EOPNOTSUPP;
1219
 
 
 
 
 
1220	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1221			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1222			FALLOC_FL_INSERT_RANGE))
1223		return -EOPNOTSUPP;
1224
1225	inode_lock(inode);
1226
1227	if (mode & FALLOC_FL_PUNCH_HOLE) {
1228		if (offset >= inode->i_size)
1229			goto out;
1230
1231		ret = punch_hole(inode, offset, len);
1232	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1233		ret = f2fs_collapse_range(inode, offset, len);
1234	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1235		ret = f2fs_zero_range(inode, offset, len, mode);
1236	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1237		ret = f2fs_insert_range(inode, offset, len);
1238	} else {
1239		ret = expand_inode_data(inode, offset, len, mode);
1240	}
1241
1242	if (!ret) {
1243		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1244		mark_inode_dirty(inode);
1245		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1246	}
1247
1248out:
1249	inode_unlock(inode);
1250
1251	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1252	return ret;
1253}
1254
1255static int f2fs_release_file(struct inode *inode, struct file *filp)
1256{
 
 
 
 
 
 
 
 
1257	/* some remained atomic pages should discarded */
1258	if (f2fs_is_atomic_file(inode))
1259		drop_inmem_pages(inode);
1260	if (f2fs_is_volatile_file(inode)) {
1261		set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
1262		filemap_fdatawrite(inode->i_mapping);
1263		clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
 
 
1264	}
1265	return 0;
1266}
1267
1268#define F2FS_REG_FLMASK		(~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1269#define F2FS_OTHER_FLMASK	(FS_NODUMP_FL | FS_NOATIME_FL)
 
1270
1271static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
1272{
1273	if (S_ISDIR(mode))
1274		return flags;
1275	else if (S_ISREG(mode))
1276		return flags & F2FS_REG_FLMASK;
1277	else
1278		return flags & F2FS_OTHER_FLMASK;
 
 
1279}
1280
1281static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1282{
1283	struct inode *inode = file_inode(filp);
1284	struct f2fs_inode_info *fi = F2FS_I(inode);
1285	unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1286	return put_user(flags, (int __user *)arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287}
1288
1289static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1290{
1291	struct inode *inode = file_inode(filp);
1292	struct f2fs_inode_info *fi = F2FS_I(inode);
1293	unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1294	unsigned int oldflags;
1295	int ret;
1296
1297	ret = mnt_want_write_file(filp);
1298	if (ret)
1299		return ret;
 
 
 
 
 
 
1300
1301	if (!inode_owner_or_capable(inode)) {
1302		ret = -EACCES;
1303		goto out;
1304	}
 
 
 
 
 
 
 
 
 
 
 
 
1305
1306	if (get_user(flags, (int __user *)arg)) {
1307		ret = -EFAULT;
1308		goto out;
1309	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1310
1311	flags = f2fs_mask_flags(inode->i_mode, flags);
 
 
 
 
1312
1313	inode_lock(inode);
 
 
1314
1315	oldflags = fi->i_flags;
 
1316
1317	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
1318		if (!capable(CAP_LINUX_IMMUTABLE)) {
1319			inode_unlock(inode);
1320			ret = -EPERM;
1321			goto out;
1322		}
1323	}
1324
1325	flags = flags & FS_FL_USER_MODIFIABLE;
1326	flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
1327	fi->i_flags = flags;
1328	inode_unlock(inode);
1329
1330	f2fs_set_inode_flags(inode);
1331	inode->i_ctime = CURRENT_TIME;
1332	mark_inode_dirty(inode);
1333out:
1334	mnt_drop_write_file(filp);
1335	return ret;
1336}
1337
1338static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1339{
1340	struct inode *inode = file_inode(filp);
1341
1342	return put_user(inode->i_generation, (int __user *)arg);
1343}
1344
1345static int f2fs_ioc_start_atomic_write(struct file *filp)
1346{
1347	struct inode *inode = file_inode(filp);
 
 
1348	int ret;
1349
1350	if (!inode_owner_or_capable(inode))
1351		return -EACCES;
1352
1353	if (f2fs_is_atomic_file(inode))
1354		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355
1356	ret = f2fs_convert_inline_inode(inode);
1357	if (ret)
1358		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1359
1360	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1361	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1362
1363	return 0;
 
 
 
 
1364}
1365
1366static int f2fs_ioc_commit_atomic_write(struct file *filp)
1367{
1368	struct inode *inode = file_inode(filp);
1369	int ret;
1370
1371	if (!inode_owner_or_capable(inode))
1372		return -EACCES;
1373
1374	if (f2fs_is_volatile_file(inode))
1375		return 0;
1376
1377	ret = mnt_want_write_file(filp);
1378	if (ret)
1379		return ret;
1380
 
 
 
 
 
 
 
 
 
1381	if (f2fs_is_atomic_file(inode)) {
1382		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1383		ret = commit_inmem_pages(inode);
1384		if (ret) {
1385			set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1386			goto err_out;
1387		}
 
 
 
 
 
1388	}
1389
1390	ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
1391err_out:
 
 
 
 
 
1392	mnt_drop_write_file(filp);
1393	return ret;
1394}
1395
1396static int f2fs_ioc_start_volatile_write(struct file *filp)
1397{
1398	struct inode *inode = file_inode(filp);
1399	int ret;
1400
1401	if (!inode_owner_or_capable(inode))
1402		return -EACCES;
1403
 
 
 
 
 
 
 
 
 
1404	if (f2fs_is_volatile_file(inode))
1405		return 0;
1406
1407	ret = f2fs_convert_inline_inode(inode);
1408	if (ret)
1409		return ret;
 
 
 
1410
1411	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
1412	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1413	return 0;
 
 
 
1414}
1415
1416static int f2fs_ioc_release_volatile_write(struct file *filp)
1417{
1418	struct inode *inode = file_inode(filp);
 
1419
1420	if (!inode_owner_or_capable(inode))
1421		return -EACCES;
1422
 
 
 
 
 
 
1423	if (!f2fs_is_volatile_file(inode))
1424		return 0;
1425
1426	if (!f2fs_is_first_block_written(inode))
1427		return truncate_partial_data_page(inode, 0, true);
 
 
1428
1429	return punch_hole(inode, 0, F2FS_BLKSIZE);
 
 
 
 
1430}
1431
1432static int f2fs_ioc_abort_volatile_write(struct file *filp)
1433{
1434	struct inode *inode = file_inode(filp);
1435	int ret;
1436
1437	if (!inode_owner_or_capable(inode))
1438		return -EACCES;
1439
1440	ret = mnt_want_write_file(filp);
1441	if (ret)
1442		return ret;
1443
1444	if (f2fs_is_atomic_file(inode)) {
1445		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1446		drop_inmem_pages(inode);
1447	}
1448	if (f2fs_is_volatile_file(inode)) {
1449		clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
1450		ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
 
1451	}
1452
 
 
 
 
1453	mnt_drop_write_file(filp);
1454	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1455	return ret;
1456}
1457
1458static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
1459{
1460	struct inode *inode = file_inode(filp);
1461	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1462	struct super_block *sb = sbi->sb;
1463	__u32 in;
 
1464
1465	if (!capable(CAP_SYS_ADMIN))
1466		return -EPERM;
1467
1468	if (get_user(in, (__u32 __user *)arg))
1469		return -EFAULT;
1470
 
 
 
 
 
 
 
 
 
 
 
 
 
1471	switch (in) {
1472	case F2FS_GOING_DOWN_FULLSYNC:
1473		sb = freeze_bdev(sb->s_bdev);
1474		if (sb && !IS_ERR(sb)) {
1475			f2fs_stop_checkpoint(sbi);
1476			thaw_bdev(sb->s_bdev, sb);
1477		}
 
1478		break;
1479	case F2FS_GOING_DOWN_METASYNC:
1480		/* do checkpoint only */
1481		f2fs_sync_fs(sb, 1);
1482		f2fs_stop_checkpoint(sbi);
 
 
 
1483		break;
1484	case F2FS_GOING_DOWN_NOSYNC:
1485		f2fs_stop_checkpoint(sbi);
 
1486		break;
1487	case F2FS_GOING_DOWN_METAFLUSH:
1488		sync_meta_pages(sbi, META, LONG_MAX);
1489		f2fs_stop_checkpoint(sbi);
 
1490		break;
 
 
 
 
 
 
 
1491	default:
1492		return -EINVAL;
 
1493	}
 
 
 
 
 
 
 
1494	f2fs_update_time(sbi, REQ_TIME);
1495	return 0;
 
 
 
 
 
 
1496}
1497
1498static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
1499{
1500	struct inode *inode = file_inode(filp);
1501	struct super_block *sb = inode->i_sb;
1502	struct request_queue *q = bdev_get_queue(sb->s_bdev);
1503	struct fstrim_range range;
1504	int ret;
1505
1506	if (!capable(CAP_SYS_ADMIN))
1507		return -EPERM;
1508
1509	if (!blk_queue_discard(q))
1510		return -EOPNOTSUPP;
1511
1512	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1513				sizeof(range)))
1514		return -EFAULT;
1515
 
 
 
 
1516	range.minlen = max((unsigned int)range.minlen,
1517				q->limits.discard_granularity);
1518	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
 
1519	if (ret < 0)
1520		return ret;
1521
1522	if (copy_to_user((struct fstrim_range __user *)arg, &range,
1523				sizeof(range)))
1524		return -EFAULT;
1525	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1526	return 0;
1527}
1528
1529static bool uuid_is_nonzero(__u8 u[16])
1530{
1531	int i;
1532
1533	for (i = 0; i < 16; i++)
1534		if (u[i])
1535			return true;
1536	return false;
1537}
1538
1539static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
1540{
1541	struct fscrypt_policy policy;
1542	struct inode *inode = file_inode(filp);
1543
1544	if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
1545							sizeof(policy)))
1546		return -EFAULT;
1547
1548	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1549	return fscrypt_process_policy(inode, &policy);
 
1550}
1551
1552static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
1553{
1554	struct fscrypt_policy policy;
1555	struct inode *inode = file_inode(filp);
1556	int err;
1557
1558	err = fscrypt_get_policy(inode, &policy);
1559	if (err)
1560		return err;
1561
1562	if (copy_to_user((struct fscrypt_policy __user *)arg, &policy, sizeof(policy)))
1563		return -EFAULT;
1564	return 0;
1565}
1566
1567static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
1568{
1569	struct inode *inode = file_inode(filp);
1570	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1571	int err;
1572
1573	if (!f2fs_sb_has_crypto(inode->i_sb))
1574		return -EOPNOTSUPP;
1575
1576	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
1577		goto got_it;
1578
1579	err = mnt_want_write_file(filp);
1580	if (err)
1581		return err;
1582
 
 
 
 
 
1583	/* update superblock with uuid */
1584	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
1585
1586	err = f2fs_commit_super(sbi, false);
1587	if (err) {
1588		/* undo new data */
1589		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
1590		mnt_drop_write_file(filp);
1591		return err;
1592	}
1593	mnt_drop_write_file(filp);
1594got_it:
1595	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
1596									16))
1597		return -EFAULT;
1598	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1599}
1600
1601static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
1602{
1603	struct inode *inode = file_inode(filp);
1604	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1605	__u32 sync;
 
1606
1607	if (!capable(CAP_SYS_ADMIN))
1608		return -EPERM;
1609
1610	if (get_user(sync, (__u32 __user *)arg))
1611		return -EFAULT;
1612
1613	if (f2fs_readonly(sbi->sb))
1614		return -EROFS;
1615
 
 
 
 
1616	if (!sync) {
1617		if (!mutex_trylock(&sbi->gc_mutex))
1618			return -EBUSY;
 
 
1619	} else {
1620		mutex_lock(&sbi->gc_mutex);
1621	}
1622
1623	return f2fs_gc(sbi, sync);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1624}
1625
1626static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
1627{
1628	struct inode *inode = file_inode(filp);
1629	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
1630
1631	if (!capable(CAP_SYS_ADMIN))
1632		return -EPERM;
1633
1634	if (f2fs_readonly(sbi->sb))
1635		return -EROFS;
1636
1637	return f2fs_sync_fs(sbi->sb, 1);
 
 
 
 
 
 
 
 
 
 
 
 
1638}
1639
1640static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
1641					struct file *filp,
1642					struct f2fs_defragment *range)
1643{
1644	struct inode *inode = file_inode(filp);
1645	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
1646	struct extent_info ei;
1647	pgoff_t pg_start, pg_end;
 
 
1648	unsigned int blk_per_seg = sbi->blocks_per_seg;
1649	unsigned int total = 0, sec_num;
1650	unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
1651	block_t blk_end = 0;
1652	bool fragmented = false;
1653	int err;
1654
1655	/* if in-place-update policy is enabled, don't waste time here */
1656	if (need_inplace_update(inode))
1657		return -EINVAL;
1658
1659	pg_start = range->start >> PAGE_SHIFT;
1660	pg_end = (range->start + range->len) >> PAGE_SHIFT;
1661
1662	f2fs_balance_fs(sbi, true);
1663
1664	inode_lock(inode);
1665
1666	/* writeback all dirty pages in the range */
1667	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
1668						range->start + range->len - 1);
1669	if (err)
1670		goto out;
1671
1672	/*
1673	 * lookup mapping info in extent cache, skip defragmenting if physical
1674	 * block addresses are continuous.
1675	 */
1676	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
1677		if (ei.fofs + ei.len >= pg_end)
1678			goto out;
1679	}
1680
1681	map.m_lblk = pg_start;
 
1682
1683	/*
1684	 * lookup mapping info in dnode page cache, skip defragmenting if all
1685	 * physical block addresses are continuous even if there are hole(s)
1686	 * in logical blocks.
1687	 */
1688	while (map.m_lblk < pg_end) {
1689		map.m_len = pg_end - map.m_lblk;
1690		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1691		if (err)
1692			goto out;
1693
1694		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1695			map.m_lblk++;
1696			continue;
1697		}
1698
1699		if (blk_end && blk_end != map.m_pblk) {
1700			fragmented = true;
1701			break;
1702		}
 
 
1703		blk_end = map.m_pblk + map.m_len;
1704
1705		map.m_lblk += map.m_len;
1706	}
1707
1708	if (!fragmented)
 
1709		goto out;
 
1710
1711	map.m_lblk = pg_start;
1712	map.m_len = pg_end - pg_start;
1713
1714	sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
1715
1716	/*
1717	 * make sure there are enough free section for LFS allocation, this can
1718	 * avoid defragment running in SSR mode when free section are allocated
1719	 * intensively
1720	 */
1721	if (has_not_enough_free_secs(sbi, sec_num)) {
1722		err = -EAGAIN;
1723		goto out;
1724	}
1725
 
 
 
 
1726	while (map.m_lblk < pg_end) {
1727		pgoff_t idx;
1728		int cnt = 0;
1729
1730do_map:
1731		map.m_len = pg_end - map.m_lblk;
1732		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1733		if (err)
1734			goto clear_out;
1735
1736		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1737			map.m_lblk++;
1738			continue;
1739		}
1740
1741		set_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1742
1743		idx = map.m_lblk;
1744		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
1745			struct page *page;
1746
1747			page = get_lock_data_page(inode, idx, true);
1748			if (IS_ERR(page)) {
1749				err = PTR_ERR(page);
1750				goto clear_out;
1751			}
1752
1753			set_page_dirty(page);
1754			f2fs_put_page(page, 1);
1755
1756			idx++;
1757			cnt++;
1758			total++;
1759		}
1760
1761		map.m_lblk = idx;
1762
1763		if (idx < pg_end && cnt < blk_per_seg)
1764			goto do_map;
1765
1766		clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1767
1768		err = filemap_fdatawrite(inode->i_mapping);
1769		if (err)
1770			goto out;
1771	}
1772clear_out:
1773	clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1774out:
1775	inode_unlock(inode);
1776	if (!err)
1777		range->len = (u64)total << PAGE_SHIFT;
1778	return err;
1779}
1780
1781static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
1782{
1783	struct inode *inode = file_inode(filp);
1784	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1785	struct f2fs_defragment range;
1786	int err;
1787
1788	if (!capable(CAP_SYS_ADMIN))
1789		return -EPERM;
1790
1791	if (!S_ISREG(inode->i_mode))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1792		return -EINVAL;
1793
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1794	err = mnt_want_write_file(filp);
1795	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1796		return err;
1797
1798	if (f2fs_readonly(sbi->sb)) {
1799		err = -EROFS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800		goto out;
1801	}
1802
1803	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
1804							sizeof(range))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805		err = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1806		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807	}
1808
1809	/* verify alignment of offset & size */
1810	if (range.start & (F2FS_BLKSIZE - 1) ||
1811		range.len & (F2FS_BLKSIZE - 1)) {
1812		err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1813		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1814	}
1815
1816	err = f2fs_defragment_range(sbi, filp, &range);
1817	f2fs_update_time(sbi, REQ_TIME);
1818	if (err < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1819		goto out;
1820
1821	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
1822							sizeof(range)))
1823		err = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1824out:
1825	mnt_drop_write_file(filp);
1826	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1827}
1828
1829long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1830{
1831	switch (cmd) {
1832	case F2FS_IOC_GETFLAGS:
1833		return f2fs_ioc_getflags(filp, arg);
1834	case F2FS_IOC_SETFLAGS:
1835		return f2fs_ioc_setflags(filp, arg);
1836	case F2FS_IOC_GETVERSION:
1837		return f2fs_ioc_getversion(filp, arg);
1838	case F2FS_IOC_START_ATOMIC_WRITE:
1839		return f2fs_ioc_start_atomic_write(filp);
1840	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
1841		return f2fs_ioc_commit_atomic_write(filp);
1842	case F2FS_IOC_START_VOLATILE_WRITE:
1843		return f2fs_ioc_start_volatile_write(filp);
1844	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
1845		return f2fs_ioc_release_volatile_write(filp);
1846	case F2FS_IOC_ABORT_VOLATILE_WRITE:
1847		return f2fs_ioc_abort_volatile_write(filp);
1848	case F2FS_IOC_SHUTDOWN:
1849		return f2fs_ioc_shutdown(filp, arg);
1850	case FITRIM:
1851		return f2fs_ioc_fitrim(filp, arg);
1852	case F2FS_IOC_SET_ENCRYPTION_POLICY:
1853		return f2fs_ioc_set_encryption_policy(filp, arg);
1854	case F2FS_IOC_GET_ENCRYPTION_POLICY:
1855		return f2fs_ioc_get_encryption_policy(filp, arg);
1856	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
1857		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
 
 
 
 
 
 
 
 
 
 
 
 
1858	case F2FS_IOC_GARBAGE_COLLECT:
1859		return f2fs_ioc_gc(filp, arg);
 
 
1860	case F2FS_IOC_WRITE_CHECKPOINT:
1861		return f2fs_ioc_write_checkpoint(filp, arg);
1862	case F2FS_IOC_DEFRAGMENT:
1863		return f2fs_ioc_defragment(filp, arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1864	default:
1865		return -ENOTTY;
1866	}
1867}
1868
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1869static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1870{
1871	struct file *file = iocb->ki_filp;
1872	struct inode *inode = file_inode(file);
1873	ssize_t ret;
1874
1875	if (f2fs_encrypted_inode(inode) &&
1876				!fscrypt_has_encryption_key(inode) &&
1877				fscrypt_get_encryption_info(inode))
1878		return -EACCES;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879
1880	inode_lock(inode);
1881	ret = generic_write_checks(iocb, from);
1882	if (ret > 0) {
1883		ret = f2fs_preallocate_blocks(iocb, from);
1884		if (!ret)
1885			ret = __generic_file_write_iter(iocb, from);
1886	}
1887	inode_unlock(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1888
1889	if (ret > 0) {
1890		ssize_t err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1891
1892		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
1893		if (err < 0)
 
 
 
1894			ret = err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895	}
 
 
 
 
 
 
 
1896	return ret;
1897}
1898
1899#ifdef CONFIG_COMPAT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1900long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1901{
 
 
 
 
 
1902	switch (cmd) {
1903	case F2FS_IOC32_GETFLAGS:
1904		cmd = F2FS_IOC_GETFLAGS;
1905		break;
1906	case F2FS_IOC32_SETFLAGS:
1907		cmd = F2FS_IOC_SETFLAGS;
1908		break;
1909	case F2FS_IOC32_GETVERSION:
1910		cmd = F2FS_IOC_GETVERSION;
1911		break;
 
 
 
 
1912	case F2FS_IOC_START_ATOMIC_WRITE:
1913	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
1914	case F2FS_IOC_START_VOLATILE_WRITE:
1915	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
1916	case F2FS_IOC_ABORT_VOLATILE_WRITE:
1917	case F2FS_IOC_SHUTDOWN:
1918	case F2FS_IOC_SET_ENCRYPTION_POLICY:
1919	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
1920	case F2FS_IOC_GET_ENCRYPTION_POLICY:
 
 
 
 
 
 
 
1921	case F2FS_IOC_GARBAGE_COLLECT:
1922	case F2FS_IOC_WRITE_CHECKPOINT:
1923	case F2FS_IOC_DEFRAGMENT:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1924		break;
1925	default:
1926		return -ENOIOCTLCMD;
1927	}
1928	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1929}
1930#endif
1931
1932const struct file_operations f2fs_file_operations = {
1933	.llseek		= f2fs_llseek,
1934	.read_iter	= generic_file_read_iter,
1935	.write_iter	= f2fs_file_write_iter,
1936	.open		= f2fs_file_open,
1937	.release	= f2fs_release_file,
1938	.mmap		= f2fs_file_mmap,
 
1939	.fsync		= f2fs_sync_file,
1940	.fallocate	= f2fs_fallocate,
1941	.unlocked_ioctl	= f2fs_ioctl,
1942#ifdef CONFIG_COMPAT
1943	.compat_ioctl	= f2fs_compat_ioctl,
1944#endif
1945	.splice_read	= generic_file_splice_read,
1946	.splice_write	= iter_file_splice_write,
1947};