Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
   1/*
   2 * inode.c - NILFS inode operations.
   3 *
   4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21 *
  22 */
  23
  24#include <linux/buffer_head.h>
  25#include <linux/gfp.h>
  26#include <linux/mpage.h>
  27#include <linux/writeback.h>
  28#include <linux/aio.h>
  29#include "nilfs.h"
  30#include "btnode.h"
  31#include "segment.h"
  32#include "page.h"
  33#include "mdt.h"
  34#include "cpfile.h"
  35#include "ifile.h"
  36
  37/**
  38 * struct nilfs_iget_args - arguments used during comparison between inodes
  39 * @ino: inode number
  40 * @cno: checkpoint number
  41 * @root: pointer on NILFS root object (mounted checkpoint)
  42 * @for_gc: inode for GC flag
  43 */
  44struct nilfs_iget_args {
  45	u64 ino;
  46	__u64 cno;
  47	struct nilfs_root *root;
  48	int for_gc;
  49};
  50
  51void nilfs_inode_add_blocks(struct inode *inode, int n)
  52{
  53	struct nilfs_root *root = NILFS_I(inode)->i_root;
  54
  55	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
  56	if (root)
  57		atomic64_add(n, &root->blocks_count);
  58}
  59
  60void nilfs_inode_sub_blocks(struct inode *inode, int n)
  61{
  62	struct nilfs_root *root = NILFS_I(inode)->i_root;
  63
  64	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
  65	if (root)
  66		atomic64_sub(n, &root->blocks_count);
  67}
  68
  69/**
  70 * nilfs_get_block() - get a file block on the filesystem (callback function)
  71 * @inode - inode struct of the target file
  72 * @blkoff - file block number
  73 * @bh_result - buffer head to be mapped on
  74 * @create - indicate whether allocating the block or not when it has not
  75 *      been allocated yet.
  76 *
  77 * This function does not issue actual read request of the specified data
  78 * block. It is done by VFS.
  79 */
  80int nilfs_get_block(struct inode *inode, sector_t blkoff,
  81		    struct buffer_head *bh_result, int create)
  82{
  83	struct nilfs_inode_info *ii = NILFS_I(inode);
  84	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  85	__u64 blknum = 0;
  86	int err = 0, ret;
  87	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
  88
  89	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  90	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  91	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  92	if (ret >= 0) {	/* found */
  93		map_bh(bh_result, inode->i_sb, blknum);
  94		if (ret > 0)
  95			bh_result->b_size = (ret << inode->i_blkbits);
  96		goto out;
  97	}
  98	/* data block was not found */
  99	if (ret == -ENOENT && create) {
 100		struct nilfs_transaction_info ti;
 101
 102		bh_result->b_blocknr = 0;
 103		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
 104		if (unlikely(err))
 105			goto out;
 106		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
 107					(unsigned long)bh_result);
 108		if (unlikely(err != 0)) {
 109			if (err == -EEXIST) {
 110				/*
 111				 * The get_block() function could be called
 112				 * from multiple callers for an inode.
 113				 * However, the page having this block must
 114				 * be locked in this case.
 115				 */
 116				printk(KERN_WARNING
 117				       "nilfs_get_block: a race condition "
 118				       "while inserting a data block. "
 119				       "(inode number=%lu, file block "
 120				       "offset=%llu)\n",
 121				       inode->i_ino,
 122				       (unsigned long long)blkoff);
 123				err = 0;
 124			}
 125			nilfs_transaction_abort(inode->i_sb);
 126			goto out;
 127		}
 128		nilfs_mark_inode_dirty(inode);
 129		nilfs_transaction_commit(inode->i_sb); /* never fails */
 130		/* Error handling should be detailed */
 131		set_buffer_new(bh_result);
 132		set_buffer_delay(bh_result);
 133		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
 134						      to proper value */
 135	} else if (ret == -ENOENT) {
 136		/* not found is not error (e.g. hole); must return without
 137		   the mapped state flag. */
 138		;
 139	} else {
 140		err = ret;
 141	}
 142
 143 out:
 144	return err;
 145}
 146
 147/**
 148 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
 149 * address_space_operations.
 150 * @file - file struct of the file to be read
 151 * @page - the page to be read
 152 */
 153static int nilfs_readpage(struct file *file, struct page *page)
 154{
 155	return mpage_readpage(page, nilfs_get_block);
 156}
 157
 158/**
 159 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
 160 * address_space_operations.
 161 * @file - file struct of the file to be read
 162 * @mapping - address_space struct used for reading multiple pages
 163 * @pages - the pages to be read
 164 * @nr_pages - number of pages to be read
 165 */
 166static int nilfs_readpages(struct file *file, struct address_space *mapping,
 167			   struct list_head *pages, unsigned nr_pages)
 168{
 169	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
 170}
 171
 172static int nilfs_writepages(struct address_space *mapping,
 173			    struct writeback_control *wbc)
 174{
 175	struct inode *inode = mapping->host;
 176	int err = 0;
 177
 178	if (inode->i_sb->s_flags & MS_RDONLY) {
 179		nilfs_clear_dirty_pages(mapping, false);
 180		return -EROFS;
 181	}
 182
 183	if (wbc->sync_mode == WB_SYNC_ALL)
 184		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 185						    wbc->range_start,
 186						    wbc->range_end);
 187	return err;
 188}
 189
 190static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 191{
 192	struct inode *inode = page->mapping->host;
 193	int err;
 194
 195	if (inode->i_sb->s_flags & MS_RDONLY) {
 196		/*
 197		 * It means that filesystem was remounted in read-only
 198		 * mode because of error or metadata corruption. But we
 199		 * have dirty pages that try to be flushed in background.
 200		 * So, here we simply discard this dirty page.
 201		 */
 202		nilfs_clear_dirty_page(page, false);
 203		unlock_page(page);
 204		return -EROFS;
 205	}
 206
 207	redirty_page_for_writepage(wbc, page);
 208	unlock_page(page);
 209
 210	if (wbc->sync_mode == WB_SYNC_ALL) {
 211		err = nilfs_construct_segment(inode->i_sb);
 212		if (unlikely(err))
 213			return err;
 214	} else if (wbc->for_reclaim)
 215		nilfs_flush_segment(inode->i_sb, inode->i_ino);
 216
 217	return 0;
 218}
 219
 220static int nilfs_set_page_dirty(struct page *page)
 221{
 222	int ret = __set_page_dirty_nobuffers(page);
 223
 224	if (page_has_buffers(page)) {
 225		struct inode *inode = page->mapping->host;
 226		unsigned nr_dirty = 0;
 227		struct buffer_head *bh, *head;
 228
 229		/*
 230		 * This page is locked by callers, and no other thread
 231		 * concurrently marks its buffers dirty since they are
 232		 * only dirtied through routines in fs/buffer.c in
 233		 * which call sites of mark_buffer_dirty are protected
 234		 * by page lock.
 235		 */
 236		bh = head = page_buffers(page);
 237		do {
 238			/* Do not mark hole blocks dirty */
 239			if (buffer_dirty(bh) || !buffer_mapped(bh))
 240				continue;
 241
 242			set_buffer_dirty(bh);
 243			nr_dirty++;
 244		} while (bh = bh->b_this_page, bh != head);
 245
 246		if (nr_dirty)
 247			nilfs_set_file_dirty(inode, nr_dirty);
 248	}
 249	return ret;
 250}
 251
 252void nilfs_write_failed(struct address_space *mapping, loff_t to)
 253{
 254	struct inode *inode = mapping->host;
 255
 256	if (to > inode->i_size) {
 257		truncate_pagecache(inode, inode->i_size);
 258		nilfs_truncate(inode);
 259	}
 260}
 261
 262static int nilfs_write_begin(struct file *file, struct address_space *mapping,
 263			     loff_t pos, unsigned len, unsigned flags,
 264			     struct page **pagep, void **fsdata)
 265
 266{
 267	struct inode *inode = mapping->host;
 268	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
 269
 270	if (unlikely(err))
 271		return err;
 272
 273	err = block_write_begin(mapping, pos, len, flags, pagep,
 274				nilfs_get_block);
 275	if (unlikely(err)) {
 276		nilfs_write_failed(mapping, pos + len);
 277		nilfs_transaction_abort(inode->i_sb);
 278	}
 279	return err;
 280}
 281
 282static int nilfs_write_end(struct file *file, struct address_space *mapping,
 283			   loff_t pos, unsigned len, unsigned copied,
 284			   struct page *page, void *fsdata)
 285{
 286	struct inode *inode = mapping->host;
 287	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
 288	unsigned nr_dirty;
 289	int err;
 290
 291	nr_dirty = nilfs_page_count_clean_buffers(page, start,
 292						  start + copied);
 293	copied = generic_write_end(file, mapping, pos, len, copied, page,
 294				   fsdata);
 295	nilfs_set_file_dirty(inode, nr_dirty);
 296	err = nilfs_transaction_commit(inode->i_sb);
 297	return err ? : copied;
 298}
 299
 300static ssize_t
 301nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
 302		loff_t offset, unsigned long nr_segs)
 303{
 304	struct file *file = iocb->ki_filp;
 305	struct address_space *mapping = file->f_mapping;
 306	struct inode *inode = file->f_mapping->host;
 307	ssize_t size;
 308
 309	if (rw == WRITE)
 310		return 0;
 311
 312	/* Needs synchronization with the cleaner */
 313	size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
 314				  nilfs_get_block);
 315
 316	/*
 317	 * In case of error extending write may have instantiated a few
 318	 * blocks outside i_size. Trim these off again.
 319	 */
 320	if (unlikely((rw & WRITE) && size < 0)) {
 321		loff_t isize = i_size_read(inode);
 322		loff_t end = offset + iov_length(iov, nr_segs);
 323
 324		if (end > isize)
 325			nilfs_write_failed(mapping, end);
 326	}
 327
 328	return size;
 329}
 330
 331const struct address_space_operations nilfs_aops = {
 332	.writepage		= nilfs_writepage,
 333	.readpage		= nilfs_readpage,
 334	.writepages		= nilfs_writepages,
 335	.set_page_dirty		= nilfs_set_page_dirty,
 336	.readpages		= nilfs_readpages,
 337	.write_begin		= nilfs_write_begin,
 338	.write_end		= nilfs_write_end,
 339	/* .releasepage		= nilfs_releasepage, */
 340	.invalidatepage		= block_invalidatepage,
 341	.direct_IO		= nilfs_direct_IO,
 342	.is_partially_uptodate  = block_is_partially_uptodate,
 343};
 344
 345struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 346{
 347	struct super_block *sb = dir->i_sb;
 348	struct the_nilfs *nilfs = sb->s_fs_info;
 349	struct inode *inode;
 350	struct nilfs_inode_info *ii;
 351	struct nilfs_root *root;
 352	int err = -ENOMEM;
 353	ino_t ino;
 354
 355	inode = new_inode(sb);
 356	if (unlikely(!inode))
 357		goto failed;
 358
 359	mapping_set_gfp_mask(inode->i_mapping,
 360			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
 361
 362	root = NILFS_I(dir)->i_root;
 363	ii = NILFS_I(inode);
 364	ii->i_state = 1 << NILFS_I_NEW;
 365	ii->i_root = root;
 366
 367	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
 368	if (unlikely(err))
 369		goto failed_ifile_create_inode;
 370	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
 371
 372	atomic64_inc(&root->inodes_count);
 373	inode_init_owner(inode, dir, mode);
 374	inode->i_ino = ino;
 375	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 376
 377	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 378		err = nilfs_bmap_read(ii->i_bmap, NULL);
 379		if (err < 0)
 380			goto failed_bmap;
 381
 382		set_bit(NILFS_I_BMAP, &ii->i_state);
 383		/* No lock is needed; iget() ensures it. */
 384	}
 385
 386	ii->i_flags = nilfs_mask_flags(
 387		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
 388
 389	/* ii->i_file_acl = 0; */
 390	/* ii->i_dir_acl = 0; */
 391	ii->i_dir_start_lookup = 0;
 392	nilfs_set_inode_flags(inode);
 393	spin_lock(&nilfs->ns_next_gen_lock);
 394	inode->i_generation = nilfs->ns_next_generation++;
 395	spin_unlock(&nilfs->ns_next_gen_lock);
 396	insert_inode_hash(inode);
 397
 398	err = nilfs_init_acl(inode, dir);
 399	if (unlikely(err))
 400		goto failed_acl; /* never occur. When supporting
 401				    nilfs_init_acl(), proper cancellation of
 402				    above jobs should be considered */
 403
 404	return inode;
 405
 406 failed_acl:
 407 failed_bmap:
 408	clear_nlink(inode);
 409	iput(inode);  /* raw_inode will be deleted through
 410			 generic_delete_inode() */
 411	goto failed;
 412
 413 failed_ifile_create_inode:
 414	make_bad_inode(inode);
 415	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
 416			 called */
 417 failed:
 418	return ERR_PTR(err);
 419}
 420
 421void nilfs_set_inode_flags(struct inode *inode)
 422{
 423	unsigned int flags = NILFS_I(inode)->i_flags;
 424
 425	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
 426			    S_DIRSYNC);
 427	if (flags & FS_SYNC_FL)
 428		inode->i_flags |= S_SYNC;
 429	if (flags & FS_APPEND_FL)
 430		inode->i_flags |= S_APPEND;
 431	if (flags & FS_IMMUTABLE_FL)
 432		inode->i_flags |= S_IMMUTABLE;
 433	if (flags & FS_NOATIME_FL)
 434		inode->i_flags |= S_NOATIME;
 435	if (flags & FS_DIRSYNC_FL)
 436		inode->i_flags |= S_DIRSYNC;
 437	mapping_set_gfp_mask(inode->i_mapping,
 438			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
 439}
 440
 441int nilfs_read_inode_common(struct inode *inode,
 442			    struct nilfs_inode *raw_inode)
 443{
 444	struct nilfs_inode_info *ii = NILFS_I(inode);
 445	int err;
 446
 447	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
 448	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
 449	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
 450	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 451	inode->i_size = le64_to_cpu(raw_inode->i_size);
 452	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 453	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
 454	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 455	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 456	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
 457	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 458	if (inode->i_nlink == 0 && inode->i_mode == 0)
 459		return -EINVAL; /* this inode is deleted */
 460
 461	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
 462	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
 463#if 0
 464	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
 465	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
 466		0 : le32_to_cpu(raw_inode->i_dir_acl);
 467#endif
 468	ii->i_dir_start_lookup = 0;
 469	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
 470
 471	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 472	    S_ISLNK(inode->i_mode)) {
 473		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
 474		if (err < 0)
 475			return err;
 476		set_bit(NILFS_I_BMAP, &ii->i_state);
 477		/* No lock is needed; iget() ensures it. */
 478	}
 479	return 0;
 480}
 481
 482static int __nilfs_read_inode(struct super_block *sb,
 483			      struct nilfs_root *root, unsigned long ino,
 484			      struct inode *inode)
 485{
 486	struct the_nilfs *nilfs = sb->s_fs_info;
 487	struct buffer_head *bh;
 488	struct nilfs_inode *raw_inode;
 489	int err;
 490
 491	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 492	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
 493	if (unlikely(err))
 494		goto bad_inode;
 495
 496	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
 497
 498	err = nilfs_read_inode_common(inode, raw_inode);
 499	if (err)
 500		goto failed_unmap;
 501
 502	if (S_ISREG(inode->i_mode)) {
 503		inode->i_op = &nilfs_file_inode_operations;
 504		inode->i_fop = &nilfs_file_operations;
 505		inode->i_mapping->a_ops = &nilfs_aops;
 506	} else if (S_ISDIR(inode->i_mode)) {
 507		inode->i_op = &nilfs_dir_inode_operations;
 508		inode->i_fop = &nilfs_dir_operations;
 509		inode->i_mapping->a_ops = &nilfs_aops;
 510	} else if (S_ISLNK(inode->i_mode)) {
 511		inode->i_op = &nilfs_symlink_inode_operations;
 512		inode->i_mapping->a_ops = &nilfs_aops;
 513	} else {
 514		inode->i_op = &nilfs_special_inode_operations;
 515		init_special_inode(
 516			inode, inode->i_mode,
 517			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
 518	}
 519	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 520	brelse(bh);
 521	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 522	nilfs_set_inode_flags(inode);
 523	return 0;
 524
 525 failed_unmap:
 526	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 527	brelse(bh);
 528
 529 bad_inode:
 530	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 531	return err;
 532}
 533
 534static int nilfs_iget_test(struct inode *inode, void *opaque)
 535{
 536	struct nilfs_iget_args *args = opaque;
 537	struct nilfs_inode_info *ii;
 538
 539	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
 540		return 0;
 541
 542	ii = NILFS_I(inode);
 543	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
 544		return !args->for_gc;
 545
 546	return args->for_gc && args->cno == ii->i_cno;
 547}
 548
 549static int nilfs_iget_set(struct inode *inode, void *opaque)
 550{
 551	struct nilfs_iget_args *args = opaque;
 552
 553	inode->i_ino = args->ino;
 554	if (args->for_gc) {
 555		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
 556		NILFS_I(inode)->i_cno = args->cno;
 557		NILFS_I(inode)->i_root = NULL;
 558	} else {
 559		if (args->root && args->ino == NILFS_ROOT_INO)
 560			nilfs_get_root(args->root);
 561		NILFS_I(inode)->i_root = args->root;
 562	}
 563	return 0;
 564}
 565
 566struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
 567			    unsigned long ino)
 568{
 569	struct nilfs_iget_args args = {
 570		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 571	};
 572
 573	return ilookup5(sb, ino, nilfs_iget_test, &args);
 574}
 575
 576struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
 577				unsigned long ino)
 578{
 579	struct nilfs_iget_args args = {
 580		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 581	};
 582
 583	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 584}
 585
 586struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
 587			 unsigned long ino)
 588{
 589	struct inode *inode;
 590	int err;
 591
 592	inode = nilfs_iget_locked(sb, root, ino);
 593	if (unlikely(!inode))
 594		return ERR_PTR(-ENOMEM);
 595	if (!(inode->i_state & I_NEW))
 596		return inode;
 597
 598	err = __nilfs_read_inode(sb, root, ino, inode);
 599	if (unlikely(err)) {
 600		iget_failed(inode);
 601		return ERR_PTR(err);
 602	}
 603	unlock_new_inode(inode);
 604	return inode;
 605}
 606
 607struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
 608				__u64 cno)
 609{
 610	struct nilfs_iget_args args = {
 611		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
 612	};
 613	struct inode *inode;
 614	int err;
 615
 616	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 617	if (unlikely(!inode))
 618		return ERR_PTR(-ENOMEM);
 619	if (!(inode->i_state & I_NEW))
 620		return inode;
 621
 622	err = nilfs_init_gcinode(inode);
 623	if (unlikely(err)) {
 624		iget_failed(inode);
 625		return ERR_PTR(err);
 626	}
 627	unlock_new_inode(inode);
 628	return inode;
 629}
 630
 631void nilfs_write_inode_common(struct inode *inode,
 632			      struct nilfs_inode *raw_inode, int has_bmap)
 633{
 634	struct nilfs_inode_info *ii = NILFS_I(inode);
 635
 636	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 637	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
 638	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
 639	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 640	raw_inode->i_size = cpu_to_le64(inode->i_size);
 641	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 642	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 643	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 644	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 645	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
 646
 647	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
 648	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 649
 650	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
 651		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 652
 653		/* zero-fill unused portion in the case of super root block */
 654		raw_inode->i_xattr = 0;
 655		raw_inode->i_pad = 0;
 656		memset((void *)raw_inode + sizeof(*raw_inode), 0,
 657		       nilfs->ns_inode_size - sizeof(*raw_inode));
 658	}
 659
 660	if (has_bmap)
 661		nilfs_bmap_write(ii->i_bmap, raw_inode);
 662	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 663		raw_inode->i_device_code =
 664			cpu_to_le64(huge_encode_dev(inode->i_rdev));
 665	/* When extending inode, nilfs->ns_inode_size should be checked
 666	   for substitutions of appended fields */
 667}
 668
 669void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
 670{
 671	ino_t ino = inode->i_ino;
 672	struct nilfs_inode_info *ii = NILFS_I(inode);
 673	struct inode *ifile = ii->i_root->ifile;
 674	struct nilfs_inode *raw_inode;
 675
 676	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
 677
 678	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
 679		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
 680	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
 681
 682	nilfs_write_inode_common(inode, raw_inode, 0);
 683		/* XXX: call with has_bmap = 0 is a workaround to avoid
 684		   deadlock of bmap. This delays update of i_bmap to just
 685		   before writing */
 686	nilfs_ifile_unmap_inode(ifile, ino, ibh);
 687}
 688
 689#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
 690
 691static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
 692				unsigned long from)
 693{
 694	unsigned long b;
 695	int ret;
 696
 697	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 698		return;
 699repeat:
 700	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
 701	if (ret == -ENOENT)
 702		return;
 703	else if (ret < 0)
 704		goto failed;
 705
 706	if (b < from)
 707		return;
 708
 709	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
 710	ret = nilfs_bmap_truncate(ii->i_bmap, b);
 711	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
 712	if (!ret || (ret == -ENOMEM &&
 713		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
 714		goto repeat;
 715
 716failed:
 717	nilfs_warning(ii->vfs_inode.i_sb, __func__,
 718		      "failed to truncate bmap (ino=%lu, err=%d)",
 719		      ii->vfs_inode.i_ino, ret);
 720}
 721
 722void nilfs_truncate(struct inode *inode)
 723{
 724	unsigned long blkoff;
 725	unsigned int blocksize;
 726	struct nilfs_transaction_info ti;
 727	struct super_block *sb = inode->i_sb;
 728	struct nilfs_inode_info *ii = NILFS_I(inode);
 729
 730	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 731		return;
 732	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 733		return;
 734
 735	blocksize = sb->s_blocksize;
 736	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
 737	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 738
 739	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
 740
 741	nilfs_truncate_bmap(ii, blkoff);
 742
 743	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 744	if (IS_SYNC(inode))
 745		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 746
 747	nilfs_mark_inode_dirty(inode);
 748	nilfs_set_file_dirty(inode, 0);
 749	nilfs_transaction_commit(sb);
 750	/* May construct a logical segment and may fail in sync mode.
 751	   But truncate has no return value. */
 752}
 753
 754static void nilfs_clear_inode(struct inode *inode)
 755{
 756	struct nilfs_inode_info *ii = NILFS_I(inode);
 757	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 758
 759	/*
 760	 * Free resources allocated in nilfs_read_inode(), here.
 761	 */
 762	BUG_ON(!list_empty(&ii->i_dirty));
 763	brelse(ii->i_bh);
 764	ii->i_bh = NULL;
 765
 766	if (mdi && mdi->mi_palloc_cache)
 767		nilfs_palloc_destroy_cache(inode);
 768
 769	if (test_bit(NILFS_I_BMAP, &ii->i_state))
 770		nilfs_bmap_clear(ii->i_bmap);
 771
 772	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 773
 774	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
 775		nilfs_put_root(ii->i_root);
 776}
 777
 778void nilfs_evict_inode(struct inode *inode)
 779{
 780	struct nilfs_transaction_info ti;
 781	struct super_block *sb = inode->i_sb;
 782	struct nilfs_inode_info *ii = NILFS_I(inode);
 783	int ret;
 784
 785	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
 786		truncate_inode_pages_final(&inode->i_data);
 787		clear_inode(inode);
 788		nilfs_clear_inode(inode);
 789		return;
 790	}
 791	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 792
 793	truncate_inode_pages_final(&inode->i_data);
 794
 795	/* TODO: some of the following operations may fail.  */
 796	nilfs_truncate_bmap(ii, 0);
 797	nilfs_mark_inode_dirty(inode);
 798	clear_inode(inode);
 799
 800	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
 801	if (!ret)
 802		atomic64_dec(&ii->i_root->inodes_count);
 803
 804	nilfs_clear_inode(inode);
 805
 806	if (IS_SYNC(inode))
 807		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 808	nilfs_transaction_commit(sb);
 809	/* May construct a logical segment and may fail in sync mode.
 810	   But delete_inode has no return value. */
 811}
 812
 813int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
 814{
 815	struct nilfs_transaction_info ti;
 816	struct inode *inode = dentry->d_inode;
 817	struct super_block *sb = inode->i_sb;
 818	int err;
 819
 820	err = inode_change_ok(inode, iattr);
 821	if (err)
 822		return err;
 823
 824	err = nilfs_transaction_begin(sb, &ti, 0);
 825	if (unlikely(err))
 826		return err;
 827
 828	if ((iattr->ia_valid & ATTR_SIZE) &&
 829	    iattr->ia_size != i_size_read(inode)) {
 830		inode_dio_wait(inode);
 831		truncate_setsize(inode, iattr->ia_size);
 832		nilfs_truncate(inode);
 833	}
 834
 835	setattr_copy(inode, iattr);
 836	mark_inode_dirty(inode);
 837
 838	if (iattr->ia_valid & ATTR_MODE) {
 839		err = nilfs_acl_chmod(inode);
 840		if (unlikely(err))
 841			goto out_err;
 842	}
 843
 844	return nilfs_transaction_commit(sb);
 845
 846out_err:
 847	nilfs_transaction_abort(sb);
 848	return err;
 849}
 850
 851int nilfs_permission(struct inode *inode, int mask)
 852{
 853	struct nilfs_root *root = NILFS_I(inode)->i_root;
 854	if ((mask & MAY_WRITE) && root &&
 855	    root->cno != NILFS_CPTREE_CURRENT_CNO)
 856		return -EROFS; /* snapshot is not writable */
 857
 858	return generic_permission(inode, mask);
 859}
 860
 861int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
 862{
 863	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 864	struct nilfs_inode_info *ii = NILFS_I(inode);
 865	int err;
 866
 867	spin_lock(&nilfs->ns_inode_lock);
 868	if (ii->i_bh == NULL) {
 869		spin_unlock(&nilfs->ns_inode_lock);
 870		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
 871						  inode->i_ino, pbh);
 872		if (unlikely(err))
 873			return err;
 874		spin_lock(&nilfs->ns_inode_lock);
 875		if (ii->i_bh == NULL)
 876			ii->i_bh = *pbh;
 877		else {
 878			brelse(*pbh);
 879			*pbh = ii->i_bh;
 880		}
 881	} else
 882		*pbh = ii->i_bh;
 883
 884	get_bh(*pbh);
 885	spin_unlock(&nilfs->ns_inode_lock);
 886	return 0;
 887}
 888
 889int nilfs_inode_dirty(struct inode *inode)
 890{
 891	struct nilfs_inode_info *ii = NILFS_I(inode);
 892	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 893	int ret = 0;
 894
 895	if (!list_empty(&ii->i_dirty)) {
 896		spin_lock(&nilfs->ns_inode_lock);
 897		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
 898			test_bit(NILFS_I_BUSY, &ii->i_state);
 899		spin_unlock(&nilfs->ns_inode_lock);
 900	}
 901	return ret;
 902}
 903
 904int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
 905{
 906	struct nilfs_inode_info *ii = NILFS_I(inode);
 907	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 908
 909	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
 910
 911	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
 912		return 0;
 913
 914	spin_lock(&nilfs->ns_inode_lock);
 915	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
 916	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
 917		/* Because this routine may race with nilfs_dispose_list(),
 918		   we have to check NILFS_I_QUEUED here, too. */
 919		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
 920			/* This will happen when somebody is freeing
 921			   this inode. */
 922			nilfs_warning(inode->i_sb, __func__,
 923				      "cannot get inode (ino=%lu)\n",
 924				      inode->i_ino);
 925			spin_unlock(&nilfs->ns_inode_lock);
 926			return -EINVAL; /* NILFS_I_DIRTY may remain for
 927					   freeing inode */
 928		}
 929		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
 930		set_bit(NILFS_I_QUEUED, &ii->i_state);
 931	}
 932	spin_unlock(&nilfs->ns_inode_lock);
 933	return 0;
 934}
 935
 936int nilfs_mark_inode_dirty(struct inode *inode)
 937{
 938	struct buffer_head *ibh;
 939	int err;
 940
 941	err = nilfs_load_inode_block(inode, &ibh);
 942	if (unlikely(err)) {
 943		nilfs_warning(inode->i_sb, __func__,
 944			      "failed to reget inode block.\n");
 945		return err;
 946	}
 947	nilfs_update_inode(inode, ibh);
 948	mark_buffer_dirty(ibh);
 949	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
 950	brelse(ibh);
 951	return 0;
 952}
 953
 954/**
 955 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
 956 * @inode: inode of the file to be registered.
 957 *
 958 * nilfs_dirty_inode() loads a inode block containing the specified
 959 * @inode and copies data from a nilfs_inode to a corresponding inode
 960 * entry in the inode block. This operation is excluded from the segment
 961 * construction. This function can be called both as a single operation
 962 * and as a part of indivisible file operations.
 963 */
 964void nilfs_dirty_inode(struct inode *inode, int flags)
 965{
 966	struct nilfs_transaction_info ti;
 967	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 968
 969	if (is_bad_inode(inode)) {
 970		nilfs_warning(inode->i_sb, __func__,
 971			      "tried to mark bad_inode dirty. ignored.\n");
 972		dump_stack();
 973		return;
 974	}
 975	if (mdi) {
 976		nilfs_mdt_mark_dirty(inode);
 977		return;
 978	}
 979	nilfs_transaction_begin(inode->i_sb, &ti, 0);
 980	nilfs_mark_inode_dirty(inode);
 981	nilfs_transaction_commit(inode->i_sb); /* never fails */
 982}
 983
 984int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 985		 __u64 start, __u64 len)
 986{
 987	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 988	__u64 logical = 0, phys = 0, size = 0;
 989	__u32 flags = 0;
 990	loff_t isize;
 991	sector_t blkoff, end_blkoff;
 992	sector_t delalloc_blkoff;
 993	unsigned long delalloc_blklen;
 994	unsigned int blkbits = inode->i_blkbits;
 995	int ret, n;
 996
 997	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
 998	if (ret)
 999		return ret;
1000
1001	mutex_lock(&inode->i_mutex);
1002
1003	isize = i_size_read(inode);
1004
1005	blkoff = start >> blkbits;
1006	end_blkoff = (start + len - 1) >> blkbits;
1007
1008	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1009							&delalloc_blkoff);
1010
1011	do {
1012		__u64 blkphy;
1013		unsigned int maxblocks;
1014
1015		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1016			if (size) {
1017				/* End of the current extent */
1018				ret = fiemap_fill_next_extent(
1019					fieinfo, logical, phys, size, flags);
1020				if (ret)
1021					break;
1022			}
1023			if (blkoff > end_blkoff)
1024				break;
1025
1026			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1027			logical = blkoff << blkbits;
1028			phys = 0;
1029			size = delalloc_blklen << blkbits;
1030
1031			blkoff = delalloc_blkoff + delalloc_blklen;
1032			delalloc_blklen = nilfs_find_uncommitted_extent(
1033				inode, blkoff, &delalloc_blkoff);
1034			continue;
1035		}
1036
1037		/*
1038		 * Limit the number of blocks that we look up so as
1039		 * not to get into the next delayed allocation extent.
1040		 */
1041		maxblocks = INT_MAX;
1042		if (delalloc_blklen)
1043			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1044					  maxblocks);
1045		blkphy = 0;
1046
1047		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1048		n = nilfs_bmap_lookup_contig(
1049			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1050		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1051
1052		if (n < 0) {
1053			int past_eof;
1054
1055			if (unlikely(n != -ENOENT))
1056				break; /* error */
1057
1058			/* HOLE */
1059			blkoff++;
1060			past_eof = ((blkoff << blkbits) >= isize);
1061
1062			if (size) {
1063				/* End of the current extent */
1064
1065				if (past_eof)
1066					flags |= FIEMAP_EXTENT_LAST;
1067
1068				ret = fiemap_fill_next_extent(
1069					fieinfo, logical, phys, size, flags);
1070				if (ret)
1071					break;
1072				size = 0;
1073			}
1074			if (blkoff > end_blkoff || past_eof)
1075				break;
1076		} else {
1077			if (size) {
1078				if (phys && blkphy << blkbits == phys + size) {
1079					/* The current extent goes on */
1080					size += n << blkbits;
1081				} else {
1082					/* Terminate the current extent */
1083					ret = fiemap_fill_next_extent(
1084						fieinfo, logical, phys, size,
1085						flags);
1086					if (ret || blkoff > end_blkoff)
1087						break;
1088
1089					/* Start another extent */
1090					flags = FIEMAP_EXTENT_MERGED;
1091					logical = blkoff << blkbits;
1092					phys = blkphy << blkbits;
1093					size = n << blkbits;
1094				}
1095			} else {
1096				/* Start a new extent */
1097				flags = FIEMAP_EXTENT_MERGED;
1098				logical = blkoff << blkbits;
1099				phys = blkphy << blkbits;
1100				size = n << blkbits;
1101			}
1102			blkoff += n;
1103		}
1104		cond_resched();
1105	} while (true);
1106
1107	/* If ret is 1 then we just hit the end of the extent array */
1108	if (ret == 1)
1109		ret = 0;
1110
1111	mutex_unlock(&inode->i_mutex);
1112	return ret;
1113}
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * inode.c - NILFS inode operations.
   4 *
   5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   6 *
   7 * Written by Ryusuke Konishi.
   8 *
   9 */
  10
  11#include <linux/buffer_head.h>
  12#include <linux/gfp.h>
  13#include <linux/mpage.h>
  14#include <linux/pagemap.h>
  15#include <linux/writeback.h>
  16#include <linux/uio.h>
  17#include <linux/fiemap.h>
  18#include "nilfs.h"
  19#include "btnode.h"
  20#include "segment.h"
  21#include "page.h"
  22#include "mdt.h"
  23#include "cpfile.h"
  24#include "ifile.h"
  25
  26/**
  27 * struct nilfs_iget_args - arguments used during comparison between inodes
  28 * @ino: inode number
  29 * @cno: checkpoint number
  30 * @root: pointer on NILFS root object (mounted checkpoint)
  31 * @for_gc: inode for GC flag
  32 */
  33struct nilfs_iget_args {
  34	u64 ino;
  35	__u64 cno;
  36	struct nilfs_root *root;
  37	int for_gc;
  38};
  39
  40static int nilfs_iget_test(struct inode *inode, void *opaque);
  41
  42void nilfs_inode_add_blocks(struct inode *inode, int n)
  43{
  44	struct nilfs_root *root = NILFS_I(inode)->i_root;
  45
  46	inode_add_bytes(inode, i_blocksize(inode) * n);
  47	if (root)
  48		atomic64_add(n, &root->blocks_count);
  49}
  50
  51void nilfs_inode_sub_blocks(struct inode *inode, int n)
  52{
  53	struct nilfs_root *root = NILFS_I(inode)->i_root;
  54
  55	inode_sub_bytes(inode, i_blocksize(inode) * n);
  56	if (root)
  57		atomic64_sub(n, &root->blocks_count);
  58}
  59
  60/**
  61 * nilfs_get_block() - get a file block on the filesystem (callback function)
  62 * @inode - inode struct of the target file
  63 * @blkoff - file block number
  64 * @bh_result - buffer head to be mapped on
  65 * @create - indicate whether allocating the block or not when it has not
  66 *      been allocated yet.
  67 *
  68 * This function does not issue actual read request of the specified data
  69 * block. It is done by VFS.
  70 */
  71int nilfs_get_block(struct inode *inode, sector_t blkoff,
  72		    struct buffer_head *bh_result, int create)
  73{
  74	struct nilfs_inode_info *ii = NILFS_I(inode);
  75	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  76	__u64 blknum = 0;
  77	int err = 0, ret;
  78	unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
  79
  80	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  81	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  82	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  83	if (ret >= 0) {	/* found */
  84		map_bh(bh_result, inode->i_sb, blknum);
  85		if (ret > 0)
  86			bh_result->b_size = (ret << inode->i_blkbits);
  87		goto out;
  88	}
  89	/* data block was not found */
  90	if (ret == -ENOENT && create) {
  91		struct nilfs_transaction_info ti;
  92
  93		bh_result->b_blocknr = 0;
  94		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  95		if (unlikely(err))
  96			goto out;
  97		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
  98					(unsigned long)bh_result);
  99		if (unlikely(err != 0)) {
 100			if (err == -EEXIST) {
 101				/*
 102				 * The get_block() function could be called
 103				 * from multiple callers for an inode.
 104				 * However, the page having this block must
 105				 * be locked in this case.
 106				 */
 107				nilfs_warn(inode->i_sb,
 108					   "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
 109					   __func__, inode->i_ino,
 110					   (unsigned long long)blkoff);
 111				err = 0;
 112			}
 113			nilfs_transaction_abort(inode->i_sb);
 114			goto out;
 115		}
 116		nilfs_mark_inode_dirty_sync(inode);
 117		nilfs_transaction_commit(inode->i_sb); /* never fails */
 118		/* Error handling should be detailed */
 119		set_buffer_new(bh_result);
 120		set_buffer_delay(bh_result);
 121		map_bh(bh_result, inode->i_sb, 0);
 122		/* Disk block number must be changed to proper value */
 123
 124	} else if (ret == -ENOENT) {
 125		/*
 126		 * not found is not error (e.g. hole); must return without
 127		 * the mapped state flag.
 128		 */
 129		;
 130	} else {
 131		err = ret;
 132	}
 133
 134 out:
 135	return err;
 136}
 137
 138/**
 139 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
 140 * address_space_operations.
 141 * @file - file struct of the file to be read
 142 * @page - the page to be read
 143 */
 144static int nilfs_readpage(struct file *file, struct page *page)
 145{
 146	return mpage_readpage(page, nilfs_get_block);
 147}
 148
 149static void nilfs_readahead(struct readahead_control *rac)
 150{
 151	mpage_readahead(rac, nilfs_get_block);
 152}
 153
 154static int nilfs_writepages(struct address_space *mapping,
 155			    struct writeback_control *wbc)
 156{
 157	struct inode *inode = mapping->host;
 158	int err = 0;
 159
 160	if (sb_rdonly(inode->i_sb)) {
 161		nilfs_clear_dirty_pages(mapping, false);
 162		return -EROFS;
 163	}
 164
 165	if (wbc->sync_mode == WB_SYNC_ALL)
 166		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 167						    wbc->range_start,
 168						    wbc->range_end);
 169	return err;
 170}
 171
 172static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 173{
 174	struct inode *inode = page->mapping->host;
 175	int err;
 176
 177	if (sb_rdonly(inode->i_sb)) {
 178		/*
 179		 * It means that filesystem was remounted in read-only
 180		 * mode because of error or metadata corruption. But we
 181		 * have dirty pages that try to be flushed in background.
 182		 * So, here we simply discard this dirty page.
 183		 */
 184		nilfs_clear_dirty_page(page, false);
 185		unlock_page(page);
 186		return -EROFS;
 187	}
 188
 189	redirty_page_for_writepage(wbc, page);
 190	unlock_page(page);
 191
 192	if (wbc->sync_mode == WB_SYNC_ALL) {
 193		err = nilfs_construct_segment(inode->i_sb);
 194		if (unlikely(err))
 195			return err;
 196	} else if (wbc->for_reclaim)
 197		nilfs_flush_segment(inode->i_sb, inode->i_ino);
 198
 199	return 0;
 200}
 201
 202static int nilfs_set_page_dirty(struct page *page)
 203{
 204	struct inode *inode = page->mapping->host;
 205	int ret = __set_page_dirty_nobuffers(page);
 206
 207	if (page_has_buffers(page)) {
 208		unsigned int nr_dirty = 0;
 209		struct buffer_head *bh, *head;
 210
 211		/*
 212		 * This page is locked by callers, and no other thread
 213		 * concurrently marks its buffers dirty since they are
 214		 * only dirtied through routines in fs/buffer.c in
 215		 * which call sites of mark_buffer_dirty are protected
 216		 * by page lock.
 217		 */
 218		bh = head = page_buffers(page);
 219		do {
 220			/* Do not mark hole blocks dirty */
 221			if (buffer_dirty(bh) || !buffer_mapped(bh))
 222				continue;
 223
 224			set_buffer_dirty(bh);
 225			nr_dirty++;
 226		} while (bh = bh->b_this_page, bh != head);
 227
 228		if (nr_dirty)
 229			nilfs_set_file_dirty(inode, nr_dirty);
 230	} else if (ret) {
 231		unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 232
 233		nilfs_set_file_dirty(inode, nr_dirty);
 234	}
 235	return ret;
 236}
 237
 238void nilfs_write_failed(struct address_space *mapping, loff_t to)
 239{
 240	struct inode *inode = mapping->host;
 241
 242	if (to > inode->i_size) {
 243		truncate_pagecache(inode, inode->i_size);
 244		nilfs_truncate(inode);
 245	}
 246}
 247
 248static int nilfs_write_begin(struct file *file, struct address_space *mapping,
 249			     loff_t pos, unsigned len, unsigned flags,
 250			     struct page **pagep, void **fsdata)
 251
 252{
 253	struct inode *inode = mapping->host;
 254	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
 255
 256	if (unlikely(err))
 257		return err;
 258
 259	err = block_write_begin(mapping, pos, len, flags, pagep,
 260				nilfs_get_block);
 261	if (unlikely(err)) {
 262		nilfs_write_failed(mapping, pos + len);
 263		nilfs_transaction_abort(inode->i_sb);
 264	}
 265	return err;
 266}
 267
 268static int nilfs_write_end(struct file *file, struct address_space *mapping,
 269			   loff_t pos, unsigned len, unsigned copied,
 270			   struct page *page, void *fsdata)
 271{
 272	struct inode *inode = mapping->host;
 273	unsigned int start = pos & (PAGE_SIZE - 1);
 274	unsigned int nr_dirty;
 275	int err;
 276
 277	nr_dirty = nilfs_page_count_clean_buffers(page, start,
 278						  start + copied);
 279	copied = generic_write_end(file, mapping, pos, len, copied, page,
 280				   fsdata);
 281	nilfs_set_file_dirty(inode, nr_dirty);
 282	err = nilfs_transaction_commit(inode->i_sb);
 283	return err ? : copied;
 284}
 285
 286static ssize_t
 287nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 288{
 289	struct inode *inode = file_inode(iocb->ki_filp);
 290
 291	if (iov_iter_rw(iter) == WRITE)
 292		return 0;
 293
 294	/* Needs synchronization with the cleaner */
 295	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
 296}
 297
 298const struct address_space_operations nilfs_aops = {
 299	.writepage		= nilfs_writepage,
 300	.readpage		= nilfs_readpage,
 301	.writepages		= nilfs_writepages,
 302	.set_page_dirty		= nilfs_set_page_dirty,
 303	.readahead		= nilfs_readahead,
 304	.write_begin		= nilfs_write_begin,
 305	.write_end		= nilfs_write_end,
 306	/* .releasepage		= nilfs_releasepage, */
 307	.invalidatepage		= block_invalidatepage,
 308	.direct_IO		= nilfs_direct_IO,
 309	.is_partially_uptodate  = block_is_partially_uptodate,
 310};
 311
 312static int nilfs_insert_inode_locked(struct inode *inode,
 313				     struct nilfs_root *root,
 314				     unsigned long ino)
 315{
 316	struct nilfs_iget_args args = {
 317		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 318	};
 319
 320	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
 321}
 322
 323struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 324{
 325	struct super_block *sb = dir->i_sb;
 326	struct the_nilfs *nilfs = sb->s_fs_info;
 327	struct inode *inode;
 328	struct nilfs_inode_info *ii;
 329	struct nilfs_root *root;
 330	int err = -ENOMEM;
 331	ino_t ino;
 332
 333	inode = new_inode(sb);
 334	if (unlikely(!inode))
 335		goto failed;
 336
 337	mapping_set_gfp_mask(inode->i_mapping,
 338			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 339
 340	root = NILFS_I(dir)->i_root;
 341	ii = NILFS_I(inode);
 342	ii->i_state = BIT(NILFS_I_NEW);
 343	ii->i_root = root;
 344
 345	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
 346	if (unlikely(err))
 347		goto failed_ifile_create_inode;
 348	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
 349
 350	atomic64_inc(&root->inodes_count);
 351	inode_init_owner(inode, dir, mode);
 352	inode->i_ino = ino;
 353	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 354
 355	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 356		err = nilfs_bmap_read(ii->i_bmap, NULL);
 357		if (err < 0)
 358			goto failed_after_creation;
 359
 360		set_bit(NILFS_I_BMAP, &ii->i_state);
 361		/* No lock is needed; iget() ensures it. */
 362	}
 363
 364	ii->i_flags = nilfs_mask_flags(
 365		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
 366
 367	/* ii->i_file_acl = 0; */
 368	/* ii->i_dir_acl = 0; */
 369	ii->i_dir_start_lookup = 0;
 370	nilfs_set_inode_flags(inode);
 371	spin_lock(&nilfs->ns_next_gen_lock);
 372	inode->i_generation = nilfs->ns_next_generation++;
 373	spin_unlock(&nilfs->ns_next_gen_lock);
 374	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
 375		err = -EIO;
 376		goto failed_after_creation;
 377	}
 378
 379	err = nilfs_init_acl(inode, dir);
 380	if (unlikely(err))
 381		/*
 382		 * Never occur.  When supporting nilfs_init_acl(),
 383		 * proper cancellation of above jobs should be considered.
 384		 */
 385		goto failed_after_creation;
 386
 387	return inode;
 388
 389 failed_after_creation:
 390	clear_nlink(inode);
 391	if (inode->i_state & I_NEW)
 392		unlock_new_inode(inode);
 393	iput(inode);  /*
 394		       * raw_inode will be deleted through
 395		       * nilfs_evict_inode().
 396		       */
 397	goto failed;
 398
 399 failed_ifile_create_inode:
 400	make_bad_inode(inode);
 401	iput(inode);
 402 failed:
 403	return ERR_PTR(err);
 404}
 405
 406void nilfs_set_inode_flags(struct inode *inode)
 407{
 408	unsigned int flags = NILFS_I(inode)->i_flags;
 409	unsigned int new_fl = 0;
 410
 411	if (flags & FS_SYNC_FL)
 412		new_fl |= S_SYNC;
 413	if (flags & FS_APPEND_FL)
 414		new_fl |= S_APPEND;
 415	if (flags & FS_IMMUTABLE_FL)
 416		new_fl |= S_IMMUTABLE;
 417	if (flags & FS_NOATIME_FL)
 418		new_fl |= S_NOATIME;
 419	if (flags & FS_DIRSYNC_FL)
 420		new_fl |= S_DIRSYNC;
 421	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
 422			S_NOATIME | S_DIRSYNC);
 423}
 424
 425int nilfs_read_inode_common(struct inode *inode,
 426			    struct nilfs_inode *raw_inode)
 427{
 428	struct nilfs_inode_info *ii = NILFS_I(inode);
 429	int err;
 430
 431	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
 432	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
 433	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
 434	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 435	inode->i_size = le64_to_cpu(raw_inode->i_size);
 436	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 437	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
 438	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 439	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 440	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
 441	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 442	if (inode->i_nlink == 0)
 443		return -ESTALE; /* this inode is deleted */
 444
 445	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
 446	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
 447#if 0
 448	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
 449	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
 450		0 : le32_to_cpu(raw_inode->i_dir_acl);
 451#endif
 452	ii->i_dir_start_lookup = 0;
 453	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
 454
 455	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 456	    S_ISLNK(inode->i_mode)) {
 457		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
 458		if (err < 0)
 459			return err;
 460		set_bit(NILFS_I_BMAP, &ii->i_state);
 461		/* No lock is needed; iget() ensures it. */
 462	}
 463	return 0;
 464}
 465
 466static int __nilfs_read_inode(struct super_block *sb,
 467			      struct nilfs_root *root, unsigned long ino,
 468			      struct inode *inode)
 469{
 470	struct the_nilfs *nilfs = sb->s_fs_info;
 471	struct buffer_head *bh;
 472	struct nilfs_inode *raw_inode;
 473	int err;
 474
 475	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 476	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
 477	if (unlikely(err))
 478		goto bad_inode;
 479
 480	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
 481
 482	err = nilfs_read_inode_common(inode, raw_inode);
 483	if (err)
 484		goto failed_unmap;
 485
 486	if (S_ISREG(inode->i_mode)) {
 487		inode->i_op = &nilfs_file_inode_operations;
 488		inode->i_fop = &nilfs_file_operations;
 489		inode->i_mapping->a_ops = &nilfs_aops;
 490	} else if (S_ISDIR(inode->i_mode)) {
 491		inode->i_op = &nilfs_dir_inode_operations;
 492		inode->i_fop = &nilfs_dir_operations;
 493		inode->i_mapping->a_ops = &nilfs_aops;
 494	} else if (S_ISLNK(inode->i_mode)) {
 495		inode->i_op = &nilfs_symlink_inode_operations;
 496		inode_nohighmem(inode);
 497		inode->i_mapping->a_ops = &nilfs_aops;
 498	} else {
 499		inode->i_op = &nilfs_special_inode_operations;
 500		init_special_inode(
 501			inode, inode->i_mode,
 502			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
 503	}
 504	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 505	brelse(bh);
 506	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 507	nilfs_set_inode_flags(inode);
 508	mapping_set_gfp_mask(inode->i_mapping,
 509			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 510	return 0;
 511
 512 failed_unmap:
 513	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 514	brelse(bh);
 515
 516 bad_inode:
 517	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 518	return err;
 519}
 520
 521static int nilfs_iget_test(struct inode *inode, void *opaque)
 522{
 523	struct nilfs_iget_args *args = opaque;
 524	struct nilfs_inode_info *ii;
 525
 526	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
 527		return 0;
 528
 529	ii = NILFS_I(inode);
 530	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
 531		return !args->for_gc;
 532
 533	return args->for_gc && args->cno == ii->i_cno;
 534}
 535
 536static int nilfs_iget_set(struct inode *inode, void *opaque)
 537{
 538	struct nilfs_iget_args *args = opaque;
 539
 540	inode->i_ino = args->ino;
 541	if (args->for_gc) {
 542		NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
 543		NILFS_I(inode)->i_cno = args->cno;
 544		NILFS_I(inode)->i_root = NULL;
 545	} else {
 546		if (args->root && args->ino == NILFS_ROOT_INO)
 547			nilfs_get_root(args->root);
 548		NILFS_I(inode)->i_root = args->root;
 549	}
 550	return 0;
 551}
 552
 553struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
 554			    unsigned long ino)
 555{
 556	struct nilfs_iget_args args = {
 557		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 558	};
 559
 560	return ilookup5(sb, ino, nilfs_iget_test, &args);
 561}
 562
 563struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
 564				unsigned long ino)
 565{
 566	struct nilfs_iget_args args = {
 567		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 568	};
 569
 570	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 571}
 572
 573struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
 574			 unsigned long ino)
 575{
 576	struct inode *inode;
 577	int err;
 578
 579	inode = nilfs_iget_locked(sb, root, ino);
 580	if (unlikely(!inode))
 581		return ERR_PTR(-ENOMEM);
 582	if (!(inode->i_state & I_NEW))
 583		return inode;
 584
 585	err = __nilfs_read_inode(sb, root, ino, inode);
 586	if (unlikely(err)) {
 587		iget_failed(inode);
 588		return ERR_PTR(err);
 589	}
 590	unlock_new_inode(inode);
 591	return inode;
 592}
 593
 594struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
 595				__u64 cno)
 596{
 597	struct nilfs_iget_args args = {
 598		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
 599	};
 600	struct inode *inode;
 601	int err;
 602
 603	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 604	if (unlikely(!inode))
 605		return ERR_PTR(-ENOMEM);
 606	if (!(inode->i_state & I_NEW))
 607		return inode;
 608
 609	err = nilfs_init_gcinode(inode);
 610	if (unlikely(err)) {
 611		iget_failed(inode);
 612		return ERR_PTR(err);
 613	}
 614	unlock_new_inode(inode);
 615	return inode;
 616}
 617
 618void nilfs_write_inode_common(struct inode *inode,
 619			      struct nilfs_inode *raw_inode, int has_bmap)
 620{
 621	struct nilfs_inode_info *ii = NILFS_I(inode);
 622
 623	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 624	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
 625	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
 626	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 627	raw_inode->i_size = cpu_to_le64(inode->i_size);
 628	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 629	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 630	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 631	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 632	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
 633
 634	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
 635	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 636
 637	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
 638		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 639
 640		/* zero-fill unused portion in the case of super root block */
 641		raw_inode->i_xattr = 0;
 642		raw_inode->i_pad = 0;
 643		memset((void *)raw_inode + sizeof(*raw_inode), 0,
 644		       nilfs->ns_inode_size - sizeof(*raw_inode));
 645	}
 646
 647	if (has_bmap)
 648		nilfs_bmap_write(ii->i_bmap, raw_inode);
 649	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 650		raw_inode->i_device_code =
 651			cpu_to_le64(huge_encode_dev(inode->i_rdev));
 652	/*
 653	 * When extending inode, nilfs->ns_inode_size should be checked
 654	 * for substitutions of appended fields.
 655	 */
 656}
 657
 658void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
 659{
 660	ino_t ino = inode->i_ino;
 661	struct nilfs_inode_info *ii = NILFS_I(inode);
 662	struct inode *ifile = ii->i_root->ifile;
 663	struct nilfs_inode *raw_inode;
 664
 665	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
 666
 667	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
 668		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
 669	if (flags & I_DIRTY_DATASYNC)
 670		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
 671
 672	nilfs_write_inode_common(inode, raw_inode, 0);
 673		/*
 674		 * XXX: call with has_bmap = 0 is a workaround to avoid
 675		 * deadlock of bmap.  This delays update of i_bmap to just
 676		 * before writing.
 677		 */
 678
 679	nilfs_ifile_unmap_inode(ifile, ino, ibh);
 680}
 681
 682#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
 683
 684static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
 685				unsigned long from)
 686{
 687	__u64 b;
 688	int ret;
 689
 690	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 691		return;
 692repeat:
 693	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
 694	if (ret == -ENOENT)
 695		return;
 696	else if (ret < 0)
 697		goto failed;
 698
 699	if (b < from)
 700		return;
 701
 702	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
 703	ret = nilfs_bmap_truncate(ii->i_bmap, b);
 704	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
 705	if (!ret || (ret == -ENOMEM &&
 706		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
 707		goto repeat;
 708
 709failed:
 710	nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
 711		   ret, ii->vfs_inode.i_ino);
 712}
 713
 714void nilfs_truncate(struct inode *inode)
 715{
 716	unsigned long blkoff;
 717	unsigned int blocksize;
 718	struct nilfs_transaction_info ti;
 719	struct super_block *sb = inode->i_sb;
 720	struct nilfs_inode_info *ii = NILFS_I(inode);
 721
 722	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 723		return;
 724	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 725		return;
 726
 727	blocksize = sb->s_blocksize;
 728	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
 729	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 730
 731	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
 732
 733	nilfs_truncate_bmap(ii, blkoff);
 734
 735	inode->i_mtime = inode->i_ctime = current_time(inode);
 736	if (IS_SYNC(inode))
 737		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 738
 739	nilfs_mark_inode_dirty(inode);
 740	nilfs_set_file_dirty(inode, 0);
 741	nilfs_transaction_commit(sb);
 742	/*
 743	 * May construct a logical segment and may fail in sync mode.
 744	 * But truncate has no return value.
 745	 */
 746}
 747
 748static void nilfs_clear_inode(struct inode *inode)
 749{
 750	struct nilfs_inode_info *ii = NILFS_I(inode);
 751
 752	/*
 753	 * Free resources allocated in nilfs_read_inode(), here.
 754	 */
 755	BUG_ON(!list_empty(&ii->i_dirty));
 756	brelse(ii->i_bh);
 757	ii->i_bh = NULL;
 758
 759	if (nilfs_is_metadata_file_inode(inode))
 760		nilfs_mdt_clear(inode);
 761
 762	if (test_bit(NILFS_I_BMAP, &ii->i_state))
 763		nilfs_bmap_clear(ii->i_bmap);
 764
 765	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 766
 767	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
 768		nilfs_put_root(ii->i_root);
 769}
 770
 771void nilfs_evict_inode(struct inode *inode)
 772{
 773	struct nilfs_transaction_info ti;
 774	struct super_block *sb = inode->i_sb;
 775	struct nilfs_inode_info *ii = NILFS_I(inode);
 776	int ret;
 777
 778	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
 779		truncate_inode_pages_final(&inode->i_data);
 780		clear_inode(inode);
 781		nilfs_clear_inode(inode);
 782		return;
 783	}
 784	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 785
 786	truncate_inode_pages_final(&inode->i_data);
 787
 788	/* TODO: some of the following operations may fail.  */
 789	nilfs_truncate_bmap(ii, 0);
 790	nilfs_mark_inode_dirty(inode);
 791	clear_inode(inode);
 792
 793	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
 794	if (!ret)
 795		atomic64_dec(&ii->i_root->inodes_count);
 796
 797	nilfs_clear_inode(inode);
 798
 799	if (IS_SYNC(inode))
 800		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 801	nilfs_transaction_commit(sb);
 802	/*
 803	 * May construct a logical segment and may fail in sync mode.
 804	 * But delete_inode has no return value.
 805	 */
 806}
 807
 808int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
 809{
 810	struct nilfs_transaction_info ti;
 811	struct inode *inode = d_inode(dentry);
 812	struct super_block *sb = inode->i_sb;
 813	int err;
 814
 815	err = setattr_prepare(dentry, iattr);
 816	if (err)
 817		return err;
 818
 819	err = nilfs_transaction_begin(sb, &ti, 0);
 820	if (unlikely(err))
 821		return err;
 822
 823	if ((iattr->ia_valid & ATTR_SIZE) &&
 824	    iattr->ia_size != i_size_read(inode)) {
 825		inode_dio_wait(inode);
 826		truncate_setsize(inode, iattr->ia_size);
 827		nilfs_truncate(inode);
 828	}
 829
 830	setattr_copy(inode, iattr);
 831	mark_inode_dirty(inode);
 832
 833	if (iattr->ia_valid & ATTR_MODE) {
 834		err = nilfs_acl_chmod(inode);
 835		if (unlikely(err))
 836			goto out_err;
 837	}
 838
 839	return nilfs_transaction_commit(sb);
 840
 841out_err:
 842	nilfs_transaction_abort(sb);
 843	return err;
 844}
 845
 846int nilfs_permission(struct inode *inode, int mask)
 847{
 848	struct nilfs_root *root = NILFS_I(inode)->i_root;
 849
 850	if ((mask & MAY_WRITE) && root &&
 851	    root->cno != NILFS_CPTREE_CURRENT_CNO)
 852		return -EROFS; /* snapshot is not writable */
 853
 854	return generic_permission(inode, mask);
 855}
 856
 857int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
 858{
 859	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 860	struct nilfs_inode_info *ii = NILFS_I(inode);
 861	int err;
 862
 863	spin_lock(&nilfs->ns_inode_lock);
 864	if (ii->i_bh == NULL) {
 865		spin_unlock(&nilfs->ns_inode_lock);
 866		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
 867						  inode->i_ino, pbh);
 868		if (unlikely(err))
 869			return err;
 870		spin_lock(&nilfs->ns_inode_lock);
 871		if (ii->i_bh == NULL)
 872			ii->i_bh = *pbh;
 873		else {
 874			brelse(*pbh);
 875			*pbh = ii->i_bh;
 876		}
 877	} else
 878		*pbh = ii->i_bh;
 879
 880	get_bh(*pbh);
 881	spin_unlock(&nilfs->ns_inode_lock);
 882	return 0;
 883}
 884
 885int nilfs_inode_dirty(struct inode *inode)
 886{
 887	struct nilfs_inode_info *ii = NILFS_I(inode);
 888	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 889	int ret = 0;
 890
 891	if (!list_empty(&ii->i_dirty)) {
 892		spin_lock(&nilfs->ns_inode_lock);
 893		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
 894			test_bit(NILFS_I_BUSY, &ii->i_state);
 895		spin_unlock(&nilfs->ns_inode_lock);
 896	}
 897	return ret;
 898}
 899
 900int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
 901{
 902	struct nilfs_inode_info *ii = NILFS_I(inode);
 903	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 904
 905	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
 906
 907	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
 908		return 0;
 909
 910	spin_lock(&nilfs->ns_inode_lock);
 911	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
 912	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
 913		/*
 914		 * Because this routine may race with nilfs_dispose_list(),
 915		 * we have to check NILFS_I_QUEUED here, too.
 916		 */
 917		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
 918			/*
 919			 * This will happen when somebody is freeing
 920			 * this inode.
 921			 */
 922			nilfs_warn(inode->i_sb,
 923				   "cannot set file dirty (ino=%lu): the file is being freed",
 924				   inode->i_ino);
 925			spin_unlock(&nilfs->ns_inode_lock);
 926			return -EINVAL; /*
 927					 * NILFS_I_DIRTY may remain for
 928					 * freeing inode.
 929					 */
 930		}
 931		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
 932		set_bit(NILFS_I_QUEUED, &ii->i_state);
 933	}
 934	spin_unlock(&nilfs->ns_inode_lock);
 935	return 0;
 936}
 937
 938int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
 939{
 940	struct buffer_head *ibh;
 941	int err;
 942
 943	err = nilfs_load_inode_block(inode, &ibh);
 944	if (unlikely(err)) {
 945		nilfs_warn(inode->i_sb,
 946			   "cannot mark inode dirty (ino=%lu): error %d loading inode block",
 947			   inode->i_ino, err);
 948		return err;
 949	}
 950	nilfs_update_inode(inode, ibh, flags);
 951	mark_buffer_dirty(ibh);
 952	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
 953	brelse(ibh);
 954	return 0;
 955}
 956
 957/**
 958 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
 959 * @inode: inode of the file to be registered.
 960 *
 961 * nilfs_dirty_inode() loads a inode block containing the specified
 962 * @inode and copies data from a nilfs_inode to a corresponding inode
 963 * entry in the inode block. This operation is excluded from the segment
 964 * construction. This function can be called both as a single operation
 965 * and as a part of indivisible file operations.
 966 */
 967void nilfs_dirty_inode(struct inode *inode, int flags)
 968{
 969	struct nilfs_transaction_info ti;
 970	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 971
 972	if (is_bad_inode(inode)) {
 973		nilfs_warn(inode->i_sb,
 974			   "tried to mark bad_inode dirty. ignored.");
 975		dump_stack();
 976		return;
 977	}
 978	if (mdi) {
 979		nilfs_mdt_mark_dirty(inode);
 980		return;
 981	}
 982	nilfs_transaction_begin(inode->i_sb, &ti, 0);
 983	__nilfs_mark_inode_dirty(inode, flags);
 984	nilfs_transaction_commit(inode->i_sb); /* never fails */
 985}
 986
 987int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 988		 __u64 start, __u64 len)
 989{
 990	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 991	__u64 logical = 0, phys = 0, size = 0;
 992	__u32 flags = 0;
 993	loff_t isize;
 994	sector_t blkoff, end_blkoff;
 995	sector_t delalloc_blkoff;
 996	unsigned long delalloc_blklen;
 997	unsigned int blkbits = inode->i_blkbits;
 998	int ret, n;
 999
1000	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1001	if (ret)
1002		return ret;
1003
1004	inode_lock(inode);
1005
1006	isize = i_size_read(inode);
1007
1008	blkoff = start >> blkbits;
1009	end_blkoff = (start + len - 1) >> blkbits;
1010
1011	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1012							&delalloc_blkoff);
1013
1014	do {
1015		__u64 blkphy;
1016		unsigned int maxblocks;
1017
1018		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1019			if (size) {
1020				/* End of the current extent */
1021				ret = fiemap_fill_next_extent(
1022					fieinfo, logical, phys, size, flags);
1023				if (ret)
1024					break;
1025			}
1026			if (blkoff > end_blkoff)
1027				break;
1028
1029			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1030			logical = blkoff << blkbits;
1031			phys = 0;
1032			size = delalloc_blklen << blkbits;
1033
1034			blkoff = delalloc_blkoff + delalloc_blklen;
1035			delalloc_blklen = nilfs_find_uncommitted_extent(
1036				inode, blkoff, &delalloc_blkoff);
1037			continue;
1038		}
1039
1040		/*
1041		 * Limit the number of blocks that we look up so as
1042		 * not to get into the next delayed allocation extent.
1043		 */
1044		maxblocks = INT_MAX;
1045		if (delalloc_blklen)
1046			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1047					  maxblocks);
1048		blkphy = 0;
1049
1050		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1051		n = nilfs_bmap_lookup_contig(
1052			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1053		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1054
1055		if (n < 0) {
1056			int past_eof;
1057
1058			if (unlikely(n != -ENOENT))
1059				break; /* error */
1060
1061			/* HOLE */
1062			blkoff++;
1063			past_eof = ((blkoff << blkbits) >= isize);
1064
1065			if (size) {
1066				/* End of the current extent */
1067
1068				if (past_eof)
1069					flags |= FIEMAP_EXTENT_LAST;
1070
1071				ret = fiemap_fill_next_extent(
1072					fieinfo, logical, phys, size, flags);
1073				if (ret)
1074					break;
1075				size = 0;
1076			}
1077			if (blkoff > end_blkoff || past_eof)
1078				break;
1079		} else {
1080			if (size) {
1081				if (phys && blkphy << blkbits == phys + size) {
1082					/* The current extent goes on */
1083					size += n << blkbits;
1084				} else {
1085					/* Terminate the current extent */
1086					ret = fiemap_fill_next_extent(
1087						fieinfo, logical, phys, size,
1088						flags);
1089					if (ret || blkoff > end_blkoff)
1090						break;
1091
1092					/* Start another extent */
1093					flags = FIEMAP_EXTENT_MERGED;
1094					logical = blkoff << blkbits;
1095					phys = blkphy << blkbits;
1096					size = n << blkbits;
1097				}
1098			} else {
1099				/* Start a new extent */
1100				flags = FIEMAP_EXTENT_MERGED;
1101				logical = blkoff << blkbits;
1102				phys = blkphy << blkbits;
1103				size = n << blkbits;
1104			}
1105			blkoff += n;
1106		}
1107		cond_resched();
1108	} while (true);
1109
1110	/* If ret is 1 then we just hit the end of the extent array */
1111	if (ret == 1)
1112		ret = 0;
1113
1114	inode_unlock(inode);
1115	return ret;
1116}