Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * inode.c - NILFS inode operations.
   3 *
   4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * Written by Ryusuke Konishi.
  17 *
  18 */
  19
  20#include <linux/buffer_head.h>
  21#include <linux/gfp.h>
  22#include <linux/mpage.h>
  23#include <linux/pagemap.h>
  24#include <linux/writeback.h>
  25#include <linux/uio.h>
 
  26#include "nilfs.h"
  27#include "btnode.h"
  28#include "segment.h"
  29#include "page.h"
  30#include "mdt.h"
  31#include "cpfile.h"
  32#include "ifile.h"
  33
  34/**
  35 * struct nilfs_iget_args - arguments used during comparison between inodes
  36 * @ino: inode number
  37 * @cno: checkpoint number
  38 * @root: pointer on NILFS root object (mounted checkpoint)
  39 * @for_gc: inode for GC flag
  40 */
  41struct nilfs_iget_args {
  42	u64 ino;
  43	__u64 cno;
  44	struct nilfs_root *root;
  45	int for_gc;
  46};
  47
  48static int nilfs_iget_test(struct inode *inode, void *opaque);
  49
  50void nilfs_inode_add_blocks(struct inode *inode, int n)
  51{
  52	struct nilfs_root *root = NILFS_I(inode)->i_root;
  53
  54	inode_add_bytes(inode, i_blocksize(inode) * n);
  55	if (root)
  56		atomic64_add(n, &root->blocks_count);
  57}
  58
  59void nilfs_inode_sub_blocks(struct inode *inode, int n)
  60{
  61	struct nilfs_root *root = NILFS_I(inode)->i_root;
  62
  63	inode_sub_bytes(inode, i_blocksize(inode) * n);
  64	if (root)
  65		atomic64_sub(n, &root->blocks_count);
  66}
  67
  68/**
  69 * nilfs_get_block() - get a file block on the filesystem (callback function)
  70 * @inode - inode struct of the target file
  71 * @blkoff - file block number
  72 * @bh_result - buffer head to be mapped on
  73 * @create - indicate whether allocating the block or not when it has not
  74 *      been allocated yet.
  75 *
  76 * This function does not issue actual read request of the specified data
  77 * block. It is done by VFS.
  78 */
  79int nilfs_get_block(struct inode *inode, sector_t blkoff,
  80		    struct buffer_head *bh_result, int create)
  81{
  82	struct nilfs_inode_info *ii = NILFS_I(inode);
  83	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  84	__u64 blknum = 0;
  85	int err = 0, ret;
  86	unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
  87
  88	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  89	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  90	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  91	if (ret >= 0) {	/* found */
  92		map_bh(bh_result, inode->i_sb, blknum);
  93		if (ret > 0)
  94			bh_result->b_size = (ret << inode->i_blkbits);
  95		goto out;
  96	}
  97	/* data block was not found */
  98	if (ret == -ENOENT && create) {
  99		struct nilfs_transaction_info ti;
 100
 101		bh_result->b_blocknr = 0;
 102		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
 103		if (unlikely(err))
 104			goto out;
 105		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
 106					(unsigned long)bh_result);
 107		if (unlikely(err != 0)) {
 108			if (err == -EEXIST) {
 109				/*
 110				 * The get_block() function could be called
 111				 * from multiple callers for an inode.
 112				 * However, the page having this block must
 113				 * be locked in this case.
 114				 */
 115				nilfs_msg(inode->i_sb, KERN_WARNING,
 116					  "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
 117					  __func__, inode->i_ino,
 118					  (unsigned long long)blkoff);
 119				err = 0;
 120			}
 121			nilfs_transaction_abort(inode->i_sb);
 122			goto out;
 123		}
 124		nilfs_mark_inode_dirty_sync(inode);
 125		nilfs_transaction_commit(inode->i_sb); /* never fails */
 126		/* Error handling should be detailed */
 127		set_buffer_new(bh_result);
 128		set_buffer_delay(bh_result);
 129		map_bh(bh_result, inode->i_sb, 0);
 130		/* Disk block number must be changed to proper value */
 131
 132	} else if (ret == -ENOENT) {
 133		/*
 134		 * not found is not error (e.g. hole); must return without
 135		 * the mapped state flag.
 136		 */
 137		;
 138	} else {
 139		err = ret;
 140	}
 141
 142 out:
 143	return err;
 144}
 145
 146/**
 147 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
 148 * address_space_operations.
 149 * @file - file struct of the file to be read
 150 * @page - the page to be read
 151 */
 152static int nilfs_readpage(struct file *file, struct page *page)
 153{
 154	return mpage_readpage(page, nilfs_get_block);
 155}
 156
 157/**
 158 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
 159 * address_space_operations.
 160 * @file - file struct of the file to be read
 161 * @mapping - address_space struct used for reading multiple pages
 162 * @pages - the pages to be read
 163 * @nr_pages - number of pages to be read
 164 */
 165static int nilfs_readpages(struct file *file, struct address_space *mapping,
 166			   struct list_head *pages, unsigned int nr_pages)
 167{
 168	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
 169}
 170
 171static int nilfs_writepages(struct address_space *mapping,
 172			    struct writeback_control *wbc)
 173{
 174	struct inode *inode = mapping->host;
 175	int err = 0;
 176
 177	if (sb_rdonly(inode->i_sb)) {
 178		nilfs_clear_dirty_pages(mapping, false);
 179		return -EROFS;
 180	}
 181
 182	if (wbc->sync_mode == WB_SYNC_ALL)
 183		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 184						    wbc->range_start,
 185						    wbc->range_end);
 186	return err;
 187}
 188
 189static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 190{
 191	struct inode *inode = page->mapping->host;
 192	int err;
 193
 194	if (sb_rdonly(inode->i_sb)) {
 195		/*
 196		 * It means that filesystem was remounted in read-only
 197		 * mode because of error or metadata corruption. But we
 198		 * have dirty pages that try to be flushed in background.
 199		 * So, here we simply discard this dirty page.
 200		 */
 201		nilfs_clear_dirty_page(page, false);
 202		unlock_page(page);
 203		return -EROFS;
 204	}
 205
 206	redirty_page_for_writepage(wbc, page);
 207	unlock_page(page);
 208
 209	if (wbc->sync_mode == WB_SYNC_ALL) {
 210		err = nilfs_construct_segment(inode->i_sb);
 211		if (unlikely(err))
 212			return err;
 213	} else if (wbc->for_reclaim)
 214		nilfs_flush_segment(inode->i_sb, inode->i_ino);
 215
 216	return 0;
 217}
 218
 219static int nilfs_set_page_dirty(struct page *page)
 220{
 221	struct inode *inode = page->mapping->host;
 222	int ret = __set_page_dirty_nobuffers(page);
 223
 224	if (page_has_buffers(page)) {
 225		unsigned int nr_dirty = 0;
 226		struct buffer_head *bh, *head;
 227
 228		/*
 229		 * This page is locked by callers, and no other thread
 230		 * concurrently marks its buffers dirty since they are
 231		 * only dirtied through routines in fs/buffer.c in
 232		 * which call sites of mark_buffer_dirty are protected
 233		 * by page lock.
 234		 */
 235		bh = head = page_buffers(page);
 236		do {
 237			/* Do not mark hole blocks dirty */
 238			if (buffer_dirty(bh) || !buffer_mapped(bh))
 239				continue;
 240
 241			set_buffer_dirty(bh);
 242			nr_dirty++;
 243		} while (bh = bh->b_this_page, bh != head);
 244
 245		if (nr_dirty)
 246			nilfs_set_file_dirty(inode, nr_dirty);
 247	} else if (ret) {
 248		unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 249
 250		nilfs_set_file_dirty(inode, nr_dirty);
 251	}
 252	return ret;
 253}
 254
 255void nilfs_write_failed(struct address_space *mapping, loff_t to)
 256{
 257	struct inode *inode = mapping->host;
 258
 259	if (to > inode->i_size) {
 260		truncate_pagecache(inode, inode->i_size);
 261		nilfs_truncate(inode);
 262	}
 263}
 264
 265static int nilfs_write_begin(struct file *file, struct address_space *mapping,
 266			     loff_t pos, unsigned len, unsigned flags,
 267			     struct page **pagep, void **fsdata)
 268
 269{
 270	struct inode *inode = mapping->host;
 271	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
 272
 273	if (unlikely(err))
 274		return err;
 275
 276	err = block_write_begin(mapping, pos, len, flags, pagep,
 277				nilfs_get_block);
 278	if (unlikely(err)) {
 279		nilfs_write_failed(mapping, pos + len);
 280		nilfs_transaction_abort(inode->i_sb);
 281	}
 282	return err;
 283}
 284
 285static int nilfs_write_end(struct file *file, struct address_space *mapping,
 286			   loff_t pos, unsigned len, unsigned copied,
 287			   struct page *page, void *fsdata)
 288{
 289	struct inode *inode = mapping->host;
 290	unsigned int start = pos & (PAGE_SIZE - 1);
 291	unsigned int nr_dirty;
 292	int err;
 293
 294	nr_dirty = nilfs_page_count_clean_buffers(page, start,
 295						  start + copied);
 296	copied = generic_write_end(file, mapping, pos, len, copied, page,
 297				   fsdata);
 298	nilfs_set_file_dirty(inode, nr_dirty);
 299	err = nilfs_transaction_commit(inode->i_sb);
 300	return err ? : copied;
 301}
 302
 303static ssize_t
 304nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 305{
 306	struct inode *inode = file_inode(iocb->ki_filp);
 307
 308	if (iov_iter_rw(iter) == WRITE)
 309		return 0;
 310
 311	/* Needs synchronization with the cleaner */
 312	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
 313}
 314
 315const struct address_space_operations nilfs_aops = {
 316	.writepage		= nilfs_writepage,
 317	.readpage		= nilfs_readpage,
 318	.writepages		= nilfs_writepages,
 319	.set_page_dirty		= nilfs_set_page_dirty,
 320	.readpages		= nilfs_readpages,
 321	.write_begin		= nilfs_write_begin,
 322	.write_end		= nilfs_write_end,
 323	/* .releasepage		= nilfs_releasepage, */
 324	.invalidatepage		= block_invalidatepage,
 325	.direct_IO		= nilfs_direct_IO,
 326	.is_partially_uptodate  = block_is_partially_uptodate,
 327};
 328
 329static int nilfs_insert_inode_locked(struct inode *inode,
 330				     struct nilfs_root *root,
 331				     unsigned long ino)
 332{
 333	struct nilfs_iget_args args = {
 334		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 335	};
 336
 337	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
 338}
 339
 340struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 341{
 342	struct super_block *sb = dir->i_sb;
 343	struct the_nilfs *nilfs = sb->s_fs_info;
 344	struct inode *inode;
 345	struct nilfs_inode_info *ii;
 346	struct nilfs_root *root;
 347	int err = -ENOMEM;
 348	ino_t ino;
 349
 350	inode = new_inode(sb);
 351	if (unlikely(!inode))
 352		goto failed;
 353
 354	mapping_set_gfp_mask(inode->i_mapping,
 355			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 356
 357	root = NILFS_I(dir)->i_root;
 358	ii = NILFS_I(inode);
 359	ii->i_state = BIT(NILFS_I_NEW);
 360	ii->i_root = root;
 361
 362	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
 363	if (unlikely(err))
 364		goto failed_ifile_create_inode;
 365	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
 366
 367	atomic64_inc(&root->inodes_count);
 368	inode_init_owner(inode, dir, mode);
 369	inode->i_ino = ino;
 370	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 371
 372	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 373		err = nilfs_bmap_read(ii->i_bmap, NULL);
 374		if (err < 0)
 375			goto failed_after_creation;
 376
 377		set_bit(NILFS_I_BMAP, &ii->i_state);
 378		/* No lock is needed; iget() ensures it. */
 379	}
 380
 381	ii->i_flags = nilfs_mask_flags(
 382		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
 383
 384	/* ii->i_file_acl = 0; */
 385	/* ii->i_dir_acl = 0; */
 386	ii->i_dir_start_lookup = 0;
 387	nilfs_set_inode_flags(inode);
 388	spin_lock(&nilfs->ns_next_gen_lock);
 389	inode->i_generation = nilfs->ns_next_generation++;
 390	spin_unlock(&nilfs->ns_next_gen_lock);
 391	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
 392		err = -EIO;
 393		goto failed_after_creation;
 394	}
 395
 396	err = nilfs_init_acl(inode, dir);
 397	if (unlikely(err))
 398		/*
 399		 * Never occur.  When supporting nilfs_init_acl(),
 400		 * proper cancellation of above jobs should be considered.
 401		 */
 402		goto failed_after_creation;
 403
 404	return inode;
 405
 406 failed_after_creation:
 407	clear_nlink(inode);
 408	unlock_new_inode(inode);
 
 409	iput(inode);  /*
 410		       * raw_inode will be deleted through
 411		       * nilfs_evict_inode().
 412		       */
 413	goto failed;
 414
 415 failed_ifile_create_inode:
 416	make_bad_inode(inode);
 417	iput(inode);
 418 failed:
 419	return ERR_PTR(err);
 420}
 421
 422void nilfs_set_inode_flags(struct inode *inode)
 423{
 424	unsigned int flags = NILFS_I(inode)->i_flags;
 425	unsigned int new_fl = 0;
 426
 427	if (flags & FS_SYNC_FL)
 428		new_fl |= S_SYNC;
 429	if (flags & FS_APPEND_FL)
 430		new_fl |= S_APPEND;
 431	if (flags & FS_IMMUTABLE_FL)
 432		new_fl |= S_IMMUTABLE;
 433	if (flags & FS_NOATIME_FL)
 434		new_fl |= S_NOATIME;
 435	if (flags & FS_DIRSYNC_FL)
 436		new_fl |= S_DIRSYNC;
 437	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
 438			S_NOATIME | S_DIRSYNC);
 439}
 440
 441int nilfs_read_inode_common(struct inode *inode,
 442			    struct nilfs_inode *raw_inode)
 443{
 444	struct nilfs_inode_info *ii = NILFS_I(inode);
 445	int err;
 446
 447	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
 448	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
 449	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
 450	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 451	inode->i_size = le64_to_cpu(raw_inode->i_size);
 452	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 453	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
 454	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 455	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 456	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
 457	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 458	if (inode->i_nlink == 0)
 459		return -ESTALE; /* this inode is deleted */
 460
 461	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
 462	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
 463#if 0
 464	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
 465	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
 466		0 : le32_to_cpu(raw_inode->i_dir_acl);
 467#endif
 468	ii->i_dir_start_lookup = 0;
 469	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
 470
 471	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 472	    S_ISLNK(inode->i_mode)) {
 473		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
 474		if (err < 0)
 475			return err;
 476		set_bit(NILFS_I_BMAP, &ii->i_state);
 477		/* No lock is needed; iget() ensures it. */
 478	}
 479	return 0;
 480}
 481
 482static int __nilfs_read_inode(struct super_block *sb,
 483			      struct nilfs_root *root, unsigned long ino,
 484			      struct inode *inode)
 485{
 486	struct the_nilfs *nilfs = sb->s_fs_info;
 487	struct buffer_head *bh;
 488	struct nilfs_inode *raw_inode;
 489	int err;
 490
 491	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 492	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
 493	if (unlikely(err))
 494		goto bad_inode;
 495
 496	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
 497
 498	err = nilfs_read_inode_common(inode, raw_inode);
 499	if (err)
 500		goto failed_unmap;
 501
 502	if (S_ISREG(inode->i_mode)) {
 503		inode->i_op = &nilfs_file_inode_operations;
 504		inode->i_fop = &nilfs_file_operations;
 505		inode->i_mapping->a_ops = &nilfs_aops;
 506	} else if (S_ISDIR(inode->i_mode)) {
 507		inode->i_op = &nilfs_dir_inode_operations;
 508		inode->i_fop = &nilfs_dir_operations;
 509		inode->i_mapping->a_ops = &nilfs_aops;
 510	} else if (S_ISLNK(inode->i_mode)) {
 511		inode->i_op = &nilfs_symlink_inode_operations;
 512		inode_nohighmem(inode);
 513		inode->i_mapping->a_ops = &nilfs_aops;
 514	} else {
 515		inode->i_op = &nilfs_special_inode_operations;
 516		init_special_inode(
 517			inode, inode->i_mode,
 518			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
 519	}
 520	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 521	brelse(bh);
 522	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 523	nilfs_set_inode_flags(inode);
 524	mapping_set_gfp_mask(inode->i_mapping,
 525			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 526	return 0;
 527
 528 failed_unmap:
 529	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 530	brelse(bh);
 531
 532 bad_inode:
 533	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 534	return err;
 535}
 536
 537static int nilfs_iget_test(struct inode *inode, void *opaque)
 538{
 539	struct nilfs_iget_args *args = opaque;
 540	struct nilfs_inode_info *ii;
 541
 542	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
 543		return 0;
 544
 545	ii = NILFS_I(inode);
 546	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
 547		return !args->for_gc;
 548
 549	return args->for_gc && args->cno == ii->i_cno;
 550}
 551
 552static int nilfs_iget_set(struct inode *inode, void *opaque)
 553{
 554	struct nilfs_iget_args *args = opaque;
 555
 556	inode->i_ino = args->ino;
 557	if (args->for_gc) {
 558		NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
 559		NILFS_I(inode)->i_cno = args->cno;
 560		NILFS_I(inode)->i_root = NULL;
 561	} else {
 562		if (args->root && args->ino == NILFS_ROOT_INO)
 563			nilfs_get_root(args->root);
 564		NILFS_I(inode)->i_root = args->root;
 565	}
 566	return 0;
 567}
 568
 569struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
 570			    unsigned long ino)
 571{
 572	struct nilfs_iget_args args = {
 573		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 574	};
 575
 576	return ilookup5(sb, ino, nilfs_iget_test, &args);
 577}
 578
 579struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
 580				unsigned long ino)
 581{
 582	struct nilfs_iget_args args = {
 583		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 584	};
 585
 586	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 587}
 588
 589struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
 590			 unsigned long ino)
 591{
 592	struct inode *inode;
 593	int err;
 594
 595	inode = nilfs_iget_locked(sb, root, ino);
 596	if (unlikely(!inode))
 597		return ERR_PTR(-ENOMEM);
 598	if (!(inode->i_state & I_NEW))
 599		return inode;
 600
 601	err = __nilfs_read_inode(sb, root, ino, inode);
 602	if (unlikely(err)) {
 603		iget_failed(inode);
 604		return ERR_PTR(err);
 605	}
 606	unlock_new_inode(inode);
 607	return inode;
 608}
 609
 610struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
 611				__u64 cno)
 612{
 613	struct nilfs_iget_args args = {
 614		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
 615	};
 616	struct inode *inode;
 617	int err;
 618
 619	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 620	if (unlikely(!inode))
 621		return ERR_PTR(-ENOMEM);
 622	if (!(inode->i_state & I_NEW))
 623		return inode;
 624
 625	err = nilfs_init_gcinode(inode);
 626	if (unlikely(err)) {
 627		iget_failed(inode);
 628		return ERR_PTR(err);
 629	}
 630	unlock_new_inode(inode);
 631	return inode;
 632}
 633
 634void nilfs_write_inode_common(struct inode *inode,
 635			      struct nilfs_inode *raw_inode, int has_bmap)
 636{
 637	struct nilfs_inode_info *ii = NILFS_I(inode);
 638
 639	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 640	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
 641	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
 642	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 643	raw_inode->i_size = cpu_to_le64(inode->i_size);
 644	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 645	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 646	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 647	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 648	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
 649
 650	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
 651	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 652
 653	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
 654		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 655
 656		/* zero-fill unused portion in the case of super root block */
 657		raw_inode->i_xattr = 0;
 658		raw_inode->i_pad = 0;
 659		memset((void *)raw_inode + sizeof(*raw_inode), 0,
 660		       nilfs->ns_inode_size - sizeof(*raw_inode));
 661	}
 662
 663	if (has_bmap)
 664		nilfs_bmap_write(ii->i_bmap, raw_inode);
 665	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 666		raw_inode->i_device_code =
 667			cpu_to_le64(huge_encode_dev(inode->i_rdev));
 668	/*
 669	 * When extending inode, nilfs->ns_inode_size should be checked
 670	 * for substitutions of appended fields.
 671	 */
 672}
 673
 674void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
 675{
 676	ino_t ino = inode->i_ino;
 677	struct nilfs_inode_info *ii = NILFS_I(inode);
 678	struct inode *ifile = ii->i_root->ifile;
 679	struct nilfs_inode *raw_inode;
 680
 681	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
 682
 683	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
 684		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
 685	if (flags & I_DIRTY_DATASYNC)
 686		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
 687
 688	nilfs_write_inode_common(inode, raw_inode, 0);
 689		/*
 690		 * XXX: call with has_bmap = 0 is a workaround to avoid
 691		 * deadlock of bmap.  This delays update of i_bmap to just
 692		 * before writing.
 693		 */
 694
 695	nilfs_ifile_unmap_inode(ifile, ino, ibh);
 696}
 697
 698#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
 699
 700static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
 701				unsigned long from)
 702{
 703	__u64 b;
 704	int ret;
 705
 706	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 707		return;
 708repeat:
 709	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
 710	if (ret == -ENOENT)
 711		return;
 712	else if (ret < 0)
 713		goto failed;
 714
 715	if (b < from)
 716		return;
 717
 718	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
 719	ret = nilfs_bmap_truncate(ii->i_bmap, b);
 720	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
 721	if (!ret || (ret == -ENOMEM &&
 722		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
 723		goto repeat;
 724
 725failed:
 726	nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING,
 727		  "error %d truncating bmap (ino=%lu)", ret,
 728		  ii->vfs_inode.i_ino);
 729}
 730
 731void nilfs_truncate(struct inode *inode)
 732{
 733	unsigned long blkoff;
 734	unsigned int blocksize;
 735	struct nilfs_transaction_info ti;
 736	struct super_block *sb = inode->i_sb;
 737	struct nilfs_inode_info *ii = NILFS_I(inode);
 738
 739	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 740		return;
 741	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 742		return;
 743
 744	blocksize = sb->s_blocksize;
 745	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
 746	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 747
 748	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
 749
 750	nilfs_truncate_bmap(ii, blkoff);
 751
 752	inode->i_mtime = inode->i_ctime = current_time(inode);
 753	if (IS_SYNC(inode))
 754		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 755
 756	nilfs_mark_inode_dirty(inode);
 757	nilfs_set_file_dirty(inode, 0);
 758	nilfs_transaction_commit(sb);
 759	/*
 760	 * May construct a logical segment and may fail in sync mode.
 761	 * But truncate has no return value.
 762	 */
 763}
 764
 765static void nilfs_clear_inode(struct inode *inode)
 766{
 767	struct nilfs_inode_info *ii = NILFS_I(inode);
 768
 769	/*
 770	 * Free resources allocated in nilfs_read_inode(), here.
 771	 */
 772	BUG_ON(!list_empty(&ii->i_dirty));
 773	brelse(ii->i_bh);
 774	ii->i_bh = NULL;
 775
 776	if (nilfs_is_metadata_file_inode(inode))
 777		nilfs_mdt_clear(inode);
 778
 779	if (test_bit(NILFS_I_BMAP, &ii->i_state))
 780		nilfs_bmap_clear(ii->i_bmap);
 781
 782	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 783
 784	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
 785		nilfs_put_root(ii->i_root);
 786}
 787
 788void nilfs_evict_inode(struct inode *inode)
 789{
 790	struct nilfs_transaction_info ti;
 791	struct super_block *sb = inode->i_sb;
 792	struct nilfs_inode_info *ii = NILFS_I(inode);
 793	int ret;
 794
 795	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
 796		truncate_inode_pages_final(&inode->i_data);
 797		clear_inode(inode);
 798		nilfs_clear_inode(inode);
 799		return;
 800	}
 801	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 802
 803	truncate_inode_pages_final(&inode->i_data);
 804
 805	/* TODO: some of the following operations may fail.  */
 806	nilfs_truncate_bmap(ii, 0);
 807	nilfs_mark_inode_dirty(inode);
 808	clear_inode(inode);
 809
 810	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
 811	if (!ret)
 812		atomic64_dec(&ii->i_root->inodes_count);
 813
 814	nilfs_clear_inode(inode);
 815
 816	if (IS_SYNC(inode))
 817		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 818	nilfs_transaction_commit(sb);
 819	/*
 820	 * May construct a logical segment and may fail in sync mode.
 821	 * But delete_inode has no return value.
 822	 */
 823}
 824
 825int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
 826{
 827	struct nilfs_transaction_info ti;
 828	struct inode *inode = d_inode(dentry);
 829	struct super_block *sb = inode->i_sb;
 830	int err;
 831
 832	err = setattr_prepare(dentry, iattr);
 833	if (err)
 834		return err;
 835
 836	err = nilfs_transaction_begin(sb, &ti, 0);
 837	if (unlikely(err))
 838		return err;
 839
 840	if ((iattr->ia_valid & ATTR_SIZE) &&
 841	    iattr->ia_size != i_size_read(inode)) {
 842		inode_dio_wait(inode);
 843		truncate_setsize(inode, iattr->ia_size);
 844		nilfs_truncate(inode);
 845	}
 846
 847	setattr_copy(inode, iattr);
 848	mark_inode_dirty(inode);
 849
 850	if (iattr->ia_valid & ATTR_MODE) {
 851		err = nilfs_acl_chmod(inode);
 852		if (unlikely(err))
 853			goto out_err;
 854	}
 855
 856	return nilfs_transaction_commit(sb);
 857
 858out_err:
 859	nilfs_transaction_abort(sb);
 860	return err;
 861}
 862
 863int nilfs_permission(struct inode *inode, int mask)
 864{
 865	struct nilfs_root *root = NILFS_I(inode)->i_root;
 866
 867	if ((mask & MAY_WRITE) && root &&
 868	    root->cno != NILFS_CPTREE_CURRENT_CNO)
 869		return -EROFS; /* snapshot is not writable */
 870
 871	return generic_permission(inode, mask);
 872}
 873
 874int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
 875{
 876	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 877	struct nilfs_inode_info *ii = NILFS_I(inode);
 878	int err;
 879
 880	spin_lock(&nilfs->ns_inode_lock);
 881	if (ii->i_bh == NULL) {
 882		spin_unlock(&nilfs->ns_inode_lock);
 883		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
 884						  inode->i_ino, pbh);
 885		if (unlikely(err))
 886			return err;
 887		spin_lock(&nilfs->ns_inode_lock);
 888		if (ii->i_bh == NULL)
 889			ii->i_bh = *pbh;
 890		else {
 891			brelse(*pbh);
 892			*pbh = ii->i_bh;
 893		}
 894	} else
 895		*pbh = ii->i_bh;
 896
 897	get_bh(*pbh);
 898	spin_unlock(&nilfs->ns_inode_lock);
 899	return 0;
 900}
 901
 902int nilfs_inode_dirty(struct inode *inode)
 903{
 904	struct nilfs_inode_info *ii = NILFS_I(inode);
 905	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 906	int ret = 0;
 907
 908	if (!list_empty(&ii->i_dirty)) {
 909		spin_lock(&nilfs->ns_inode_lock);
 910		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
 911			test_bit(NILFS_I_BUSY, &ii->i_state);
 912		spin_unlock(&nilfs->ns_inode_lock);
 913	}
 914	return ret;
 915}
 916
 917int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
 918{
 919	struct nilfs_inode_info *ii = NILFS_I(inode);
 920	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 921
 922	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
 923
 924	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
 925		return 0;
 926
 927	spin_lock(&nilfs->ns_inode_lock);
 928	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
 929	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
 930		/*
 931		 * Because this routine may race with nilfs_dispose_list(),
 932		 * we have to check NILFS_I_QUEUED here, too.
 933		 */
 934		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
 935			/*
 936			 * This will happen when somebody is freeing
 937			 * this inode.
 938			 */
 939			nilfs_msg(inode->i_sb, KERN_WARNING,
 940				  "cannot set file dirty (ino=%lu): the file is being freed",
 941				  inode->i_ino);
 942			spin_unlock(&nilfs->ns_inode_lock);
 943			return -EINVAL; /*
 944					 * NILFS_I_DIRTY may remain for
 945					 * freeing inode.
 946					 */
 947		}
 948		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
 949		set_bit(NILFS_I_QUEUED, &ii->i_state);
 950	}
 951	spin_unlock(&nilfs->ns_inode_lock);
 952	return 0;
 953}
 954
 955int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
 956{
 957	struct buffer_head *ibh;
 958	int err;
 959
 960	err = nilfs_load_inode_block(inode, &ibh);
 961	if (unlikely(err)) {
 962		nilfs_msg(inode->i_sb, KERN_WARNING,
 963			  "cannot mark inode dirty (ino=%lu): error %d loading inode block",
 964			  inode->i_ino, err);
 965		return err;
 966	}
 967	nilfs_update_inode(inode, ibh, flags);
 968	mark_buffer_dirty(ibh);
 969	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
 970	brelse(ibh);
 971	return 0;
 972}
 973
 974/**
 975 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
 976 * @inode: inode of the file to be registered.
 977 *
 978 * nilfs_dirty_inode() loads a inode block containing the specified
 979 * @inode and copies data from a nilfs_inode to a corresponding inode
 980 * entry in the inode block. This operation is excluded from the segment
 981 * construction. This function can be called both as a single operation
 982 * and as a part of indivisible file operations.
 983 */
 984void nilfs_dirty_inode(struct inode *inode, int flags)
 985{
 986	struct nilfs_transaction_info ti;
 987	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 988
 989	if (is_bad_inode(inode)) {
 990		nilfs_msg(inode->i_sb, KERN_WARNING,
 991			  "tried to mark bad_inode dirty. ignored.");
 992		dump_stack();
 993		return;
 994	}
 995	if (mdi) {
 996		nilfs_mdt_mark_dirty(inode);
 997		return;
 998	}
 999	nilfs_transaction_begin(inode->i_sb, &ti, 0);
1000	__nilfs_mark_inode_dirty(inode, flags);
1001	nilfs_transaction_commit(inode->i_sb); /* never fails */
1002}
1003
1004int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1005		 __u64 start, __u64 len)
1006{
1007	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1008	__u64 logical = 0, phys = 0, size = 0;
1009	__u32 flags = 0;
1010	loff_t isize;
1011	sector_t blkoff, end_blkoff;
1012	sector_t delalloc_blkoff;
1013	unsigned long delalloc_blklen;
1014	unsigned int blkbits = inode->i_blkbits;
1015	int ret, n;
1016
1017	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1018	if (ret)
1019		return ret;
1020
1021	inode_lock(inode);
1022
1023	isize = i_size_read(inode);
1024
1025	blkoff = start >> blkbits;
1026	end_blkoff = (start + len - 1) >> blkbits;
1027
1028	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1029							&delalloc_blkoff);
1030
1031	do {
1032		__u64 blkphy;
1033		unsigned int maxblocks;
1034
1035		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1036			if (size) {
1037				/* End of the current extent */
1038				ret = fiemap_fill_next_extent(
1039					fieinfo, logical, phys, size, flags);
1040				if (ret)
1041					break;
1042			}
1043			if (blkoff > end_blkoff)
1044				break;
1045
1046			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1047			logical = blkoff << blkbits;
1048			phys = 0;
1049			size = delalloc_blklen << blkbits;
1050
1051			blkoff = delalloc_blkoff + delalloc_blklen;
1052			delalloc_blklen = nilfs_find_uncommitted_extent(
1053				inode, blkoff, &delalloc_blkoff);
1054			continue;
1055		}
1056
1057		/*
1058		 * Limit the number of blocks that we look up so as
1059		 * not to get into the next delayed allocation extent.
1060		 */
1061		maxblocks = INT_MAX;
1062		if (delalloc_blklen)
1063			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1064					  maxblocks);
1065		blkphy = 0;
1066
1067		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1068		n = nilfs_bmap_lookup_contig(
1069			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1070		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1071
1072		if (n < 0) {
1073			int past_eof;
1074
1075			if (unlikely(n != -ENOENT))
1076				break; /* error */
1077
1078			/* HOLE */
1079			blkoff++;
1080			past_eof = ((blkoff << blkbits) >= isize);
1081
1082			if (size) {
1083				/* End of the current extent */
1084
1085				if (past_eof)
1086					flags |= FIEMAP_EXTENT_LAST;
1087
1088				ret = fiemap_fill_next_extent(
1089					fieinfo, logical, phys, size, flags);
1090				if (ret)
1091					break;
1092				size = 0;
1093			}
1094			if (blkoff > end_blkoff || past_eof)
1095				break;
1096		} else {
1097			if (size) {
1098				if (phys && blkphy << blkbits == phys + size) {
1099					/* The current extent goes on */
1100					size += n << blkbits;
1101				} else {
1102					/* Terminate the current extent */
1103					ret = fiemap_fill_next_extent(
1104						fieinfo, logical, phys, size,
1105						flags);
1106					if (ret || blkoff > end_blkoff)
1107						break;
1108
1109					/* Start another extent */
1110					flags = FIEMAP_EXTENT_MERGED;
1111					logical = blkoff << blkbits;
1112					phys = blkphy << blkbits;
1113					size = n << blkbits;
1114				}
1115			} else {
1116				/* Start a new extent */
1117				flags = FIEMAP_EXTENT_MERGED;
1118				logical = blkoff << blkbits;
1119				phys = blkphy << blkbits;
1120				size = n << blkbits;
1121			}
1122			blkoff += n;
1123		}
1124		cond_resched();
1125	} while (true);
1126
1127	/* If ret is 1 then we just hit the end of the extent array */
1128	if (ret == 1)
1129		ret = 0;
1130
1131	inode_unlock(inode);
1132	return ret;
1133}
v5.9
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * inode.c - NILFS inode operations.
   4 *
   5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
   6 *
 
 
 
 
 
 
 
 
 
 
   7 * Written by Ryusuke Konishi.
   8 *
   9 */
  10
  11#include <linux/buffer_head.h>
  12#include <linux/gfp.h>
  13#include <linux/mpage.h>
  14#include <linux/pagemap.h>
  15#include <linux/writeback.h>
  16#include <linux/uio.h>
  17#include <linux/fiemap.h>
  18#include "nilfs.h"
  19#include "btnode.h"
  20#include "segment.h"
  21#include "page.h"
  22#include "mdt.h"
  23#include "cpfile.h"
  24#include "ifile.h"
  25
  26/**
  27 * struct nilfs_iget_args - arguments used during comparison between inodes
  28 * @ino: inode number
  29 * @cno: checkpoint number
  30 * @root: pointer on NILFS root object (mounted checkpoint)
  31 * @for_gc: inode for GC flag
  32 */
  33struct nilfs_iget_args {
  34	u64 ino;
  35	__u64 cno;
  36	struct nilfs_root *root;
  37	int for_gc;
  38};
  39
  40static int nilfs_iget_test(struct inode *inode, void *opaque);
  41
  42void nilfs_inode_add_blocks(struct inode *inode, int n)
  43{
  44	struct nilfs_root *root = NILFS_I(inode)->i_root;
  45
  46	inode_add_bytes(inode, i_blocksize(inode) * n);
  47	if (root)
  48		atomic64_add(n, &root->blocks_count);
  49}
  50
  51void nilfs_inode_sub_blocks(struct inode *inode, int n)
  52{
  53	struct nilfs_root *root = NILFS_I(inode)->i_root;
  54
  55	inode_sub_bytes(inode, i_blocksize(inode) * n);
  56	if (root)
  57		atomic64_sub(n, &root->blocks_count);
  58}
  59
  60/**
  61 * nilfs_get_block() - get a file block on the filesystem (callback function)
  62 * @inode - inode struct of the target file
  63 * @blkoff - file block number
  64 * @bh_result - buffer head to be mapped on
  65 * @create - indicate whether allocating the block or not when it has not
  66 *      been allocated yet.
  67 *
  68 * This function does not issue actual read request of the specified data
  69 * block. It is done by VFS.
  70 */
  71int nilfs_get_block(struct inode *inode, sector_t blkoff,
  72		    struct buffer_head *bh_result, int create)
  73{
  74	struct nilfs_inode_info *ii = NILFS_I(inode);
  75	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  76	__u64 blknum = 0;
  77	int err = 0, ret;
  78	unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
  79
  80	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  81	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  82	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  83	if (ret >= 0) {	/* found */
  84		map_bh(bh_result, inode->i_sb, blknum);
  85		if (ret > 0)
  86			bh_result->b_size = (ret << inode->i_blkbits);
  87		goto out;
  88	}
  89	/* data block was not found */
  90	if (ret == -ENOENT && create) {
  91		struct nilfs_transaction_info ti;
  92
  93		bh_result->b_blocknr = 0;
  94		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  95		if (unlikely(err))
  96			goto out;
  97		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
  98					(unsigned long)bh_result);
  99		if (unlikely(err != 0)) {
 100			if (err == -EEXIST) {
 101				/*
 102				 * The get_block() function could be called
 103				 * from multiple callers for an inode.
 104				 * However, the page having this block must
 105				 * be locked in this case.
 106				 */
 107				nilfs_warn(inode->i_sb,
 108					   "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
 109					   __func__, inode->i_ino,
 110					   (unsigned long long)blkoff);
 111				err = 0;
 112			}
 113			nilfs_transaction_abort(inode->i_sb);
 114			goto out;
 115		}
 116		nilfs_mark_inode_dirty_sync(inode);
 117		nilfs_transaction_commit(inode->i_sb); /* never fails */
 118		/* Error handling should be detailed */
 119		set_buffer_new(bh_result);
 120		set_buffer_delay(bh_result);
 121		map_bh(bh_result, inode->i_sb, 0);
 122		/* Disk block number must be changed to proper value */
 123
 124	} else if (ret == -ENOENT) {
 125		/*
 126		 * not found is not error (e.g. hole); must return without
 127		 * the mapped state flag.
 128		 */
 129		;
 130	} else {
 131		err = ret;
 132	}
 133
 134 out:
 135	return err;
 136}
 137
 138/**
 139 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
 140 * address_space_operations.
 141 * @file - file struct of the file to be read
 142 * @page - the page to be read
 143 */
 144static int nilfs_readpage(struct file *file, struct page *page)
 145{
 146	return mpage_readpage(page, nilfs_get_block);
 147}
 148
 149static void nilfs_readahead(struct readahead_control *rac)
 
 
 
 
 
 
 
 
 
 150{
 151	mpage_readahead(rac, nilfs_get_block);
 152}
 153
 154static int nilfs_writepages(struct address_space *mapping,
 155			    struct writeback_control *wbc)
 156{
 157	struct inode *inode = mapping->host;
 158	int err = 0;
 159
 160	if (sb_rdonly(inode->i_sb)) {
 161		nilfs_clear_dirty_pages(mapping, false);
 162		return -EROFS;
 163	}
 164
 165	if (wbc->sync_mode == WB_SYNC_ALL)
 166		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 167						    wbc->range_start,
 168						    wbc->range_end);
 169	return err;
 170}
 171
 172static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 173{
 174	struct inode *inode = page->mapping->host;
 175	int err;
 176
 177	if (sb_rdonly(inode->i_sb)) {
 178		/*
 179		 * It means that filesystem was remounted in read-only
 180		 * mode because of error or metadata corruption. But we
 181		 * have dirty pages that try to be flushed in background.
 182		 * So, here we simply discard this dirty page.
 183		 */
 184		nilfs_clear_dirty_page(page, false);
 185		unlock_page(page);
 186		return -EROFS;
 187	}
 188
 189	redirty_page_for_writepage(wbc, page);
 190	unlock_page(page);
 191
 192	if (wbc->sync_mode == WB_SYNC_ALL) {
 193		err = nilfs_construct_segment(inode->i_sb);
 194		if (unlikely(err))
 195			return err;
 196	} else if (wbc->for_reclaim)
 197		nilfs_flush_segment(inode->i_sb, inode->i_ino);
 198
 199	return 0;
 200}
 201
 202static int nilfs_set_page_dirty(struct page *page)
 203{
 204	struct inode *inode = page->mapping->host;
 205	int ret = __set_page_dirty_nobuffers(page);
 206
 207	if (page_has_buffers(page)) {
 208		unsigned int nr_dirty = 0;
 209		struct buffer_head *bh, *head;
 210
 211		/*
 212		 * This page is locked by callers, and no other thread
 213		 * concurrently marks its buffers dirty since they are
 214		 * only dirtied through routines in fs/buffer.c in
 215		 * which call sites of mark_buffer_dirty are protected
 216		 * by page lock.
 217		 */
 218		bh = head = page_buffers(page);
 219		do {
 220			/* Do not mark hole blocks dirty */
 221			if (buffer_dirty(bh) || !buffer_mapped(bh))
 222				continue;
 223
 224			set_buffer_dirty(bh);
 225			nr_dirty++;
 226		} while (bh = bh->b_this_page, bh != head);
 227
 228		if (nr_dirty)
 229			nilfs_set_file_dirty(inode, nr_dirty);
 230	} else if (ret) {
 231		unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 232
 233		nilfs_set_file_dirty(inode, nr_dirty);
 234	}
 235	return ret;
 236}
 237
 238void nilfs_write_failed(struct address_space *mapping, loff_t to)
 239{
 240	struct inode *inode = mapping->host;
 241
 242	if (to > inode->i_size) {
 243		truncate_pagecache(inode, inode->i_size);
 244		nilfs_truncate(inode);
 245	}
 246}
 247
 248static int nilfs_write_begin(struct file *file, struct address_space *mapping,
 249			     loff_t pos, unsigned len, unsigned flags,
 250			     struct page **pagep, void **fsdata)
 251
 252{
 253	struct inode *inode = mapping->host;
 254	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
 255
 256	if (unlikely(err))
 257		return err;
 258
 259	err = block_write_begin(mapping, pos, len, flags, pagep,
 260				nilfs_get_block);
 261	if (unlikely(err)) {
 262		nilfs_write_failed(mapping, pos + len);
 263		nilfs_transaction_abort(inode->i_sb);
 264	}
 265	return err;
 266}
 267
 268static int nilfs_write_end(struct file *file, struct address_space *mapping,
 269			   loff_t pos, unsigned len, unsigned copied,
 270			   struct page *page, void *fsdata)
 271{
 272	struct inode *inode = mapping->host;
 273	unsigned int start = pos & (PAGE_SIZE - 1);
 274	unsigned int nr_dirty;
 275	int err;
 276
 277	nr_dirty = nilfs_page_count_clean_buffers(page, start,
 278						  start + copied);
 279	copied = generic_write_end(file, mapping, pos, len, copied, page,
 280				   fsdata);
 281	nilfs_set_file_dirty(inode, nr_dirty);
 282	err = nilfs_transaction_commit(inode->i_sb);
 283	return err ? : copied;
 284}
 285
 286static ssize_t
 287nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 288{
 289	struct inode *inode = file_inode(iocb->ki_filp);
 290
 291	if (iov_iter_rw(iter) == WRITE)
 292		return 0;
 293
 294	/* Needs synchronization with the cleaner */
 295	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
 296}
 297
 298const struct address_space_operations nilfs_aops = {
 299	.writepage		= nilfs_writepage,
 300	.readpage		= nilfs_readpage,
 301	.writepages		= nilfs_writepages,
 302	.set_page_dirty		= nilfs_set_page_dirty,
 303	.readahead		= nilfs_readahead,
 304	.write_begin		= nilfs_write_begin,
 305	.write_end		= nilfs_write_end,
 306	/* .releasepage		= nilfs_releasepage, */
 307	.invalidatepage		= block_invalidatepage,
 308	.direct_IO		= nilfs_direct_IO,
 309	.is_partially_uptodate  = block_is_partially_uptodate,
 310};
 311
 312static int nilfs_insert_inode_locked(struct inode *inode,
 313				     struct nilfs_root *root,
 314				     unsigned long ino)
 315{
 316	struct nilfs_iget_args args = {
 317		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 318	};
 319
 320	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
 321}
 322
 323struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 324{
 325	struct super_block *sb = dir->i_sb;
 326	struct the_nilfs *nilfs = sb->s_fs_info;
 327	struct inode *inode;
 328	struct nilfs_inode_info *ii;
 329	struct nilfs_root *root;
 330	int err = -ENOMEM;
 331	ino_t ino;
 332
 333	inode = new_inode(sb);
 334	if (unlikely(!inode))
 335		goto failed;
 336
 337	mapping_set_gfp_mask(inode->i_mapping,
 338			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 339
 340	root = NILFS_I(dir)->i_root;
 341	ii = NILFS_I(inode);
 342	ii->i_state = BIT(NILFS_I_NEW);
 343	ii->i_root = root;
 344
 345	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
 346	if (unlikely(err))
 347		goto failed_ifile_create_inode;
 348	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
 349
 350	atomic64_inc(&root->inodes_count);
 351	inode_init_owner(inode, dir, mode);
 352	inode->i_ino = ino;
 353	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 354
 355	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
 356		err = nilfs_bmap_read(ii->i_bmap, NULL);
 357		if (err < 0)
 358			goto failed_after_creation;
 359
 360		set_bit(NILFS_I_BMAP, &ii->i_state);
 361		/* No lock is needed; iget() ensures it. */
 362	}
 363
 364	ii->i_flags = nilfs_mask_flags(
 365		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
 366
 367	/* ii->i_file_acl = 0; */
 368	/* ii->i_dir_acl = 0; */
 369	ii->i_dir_start_lookup = 0;
 370	nilfs_set_inode_flags(inode);
 371	spin_lock(&nilfs->ns_next_gen_lock);
 372	inode->i_generation = nilfs->ns_next_generation++;
 373	spin_unlock(&nilfs->ns_next_gen_lock);
 374	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
 375		err = -EIO;
 376		goto failed_after_creation;
 377	}
 378
 379	err = nilfs_init_acl(inode, dir);
 380	if (unlikely(err))
 381		/*
 382		 * Never occur.  When supporting nilfs_init_acl(),
 383		 * proper cancellation of above jobs should be considered.
 384		 */
 385		goto failed_after_creation;
 386
 387	return inode;
 388
 389 failed_after_creation:
 390	clear_nlink(inode);
 391	if (inode->i_state & I_NEW)
 392		unlock_new_inode(inode);
 393	iput(inode);  /*
 394		       * raw_inode will be deleted through
 395		       * nilfs_evict_inode().
 396		       */
 397	goto failed;
 398
 399 failed_ifile_create_inode:
 400	make_bad_inode(inode);
 401	iput(inode);
 402 failed:
 403	return ERR_PTR(err);
 404}
 405
 406void nilfs_set_inode_flags(struct inode *inode)
 407{
 408	unsigned int flags = NILFS_I(inode)->i_flags;
 409	unsigned int new_fl = 0;
 410
 411	if (flags & FS_SYNC_FL)
 412		new_fl |= S_SYNC;
 413	if (flags & FS_APPEND_FL)
 414		new_fl |= S_APPEND;
 415	if (flags & FS_IMMUTABLE_FL)
 416		new_fl |= S_IMMUTABLE;
 417	if (flags & FS_NOATIME_FL)
 418		new_fl |= S_NOATIME;
 419	if (flags & FS_DIRSYNC_FL)
 420		new_fl |= S_DIRSYNC;
 421	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
 422			S_NOATIME | S_DIRSYNC);
 423}
 424
 425int nilfs_read_inode_common(struct inode *inode,
 426			    struct nilfs_inode *raw_inode)
 427{
 428	struct nilfs_inode_info *ii = NILFS_I(inode);
 429	int err;
 430
 431	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
 432	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
 433	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
 434	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 435	inode->i_size = le64_to_cpu(raw_inode->i_size);
 436	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 437	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
 438	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
 439	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 440	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
 441	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
 442	if (inode->i_nlink == 0)
 443		return -ESTALE; /* this inode is deleted */
 444
 445	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
 446	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
 447#if 0
 448	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
 449	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
 450		0 : le32_to_cpu(raw_inode->i_dir_acl);
 451#endif
 452	ii->i_dir_start_lookup = 0;
 453	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
 454
 455	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 456	    S_ISLNK(inode->i_mode)) {
 457		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
 458		if (err < 0)
 459			return err;
 460		set_bit(NILFS_I_BMAP, &ii->i_state);
 461		/* No lock is needed; iget() ensures it. */
 462	}
 463	return 0;
 464}
 465
 466static int __nilfs_read_inode(struct super_block *sb,
 467			      struct nilfs_root *root, unsigned long ino,
 468			      struct inode *inode)
 469{
 470	struct the_nilfs *nilfs = sb->s_fs_info;
 471	struct buffer_head *bh;
 472	struct nilfs_inode *raw_inode;
 473	int err;
 474
 475	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 476	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
 477	if (unlikely(err))
 478		goto bad_inode;
 479
 480	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
 481
 482	err = nilfs_read_inode_common(inode, raw_inode);
 483	if (err)
 484		goto failed_unmap;
 485
 486	if (S_ISREG(inode->i_mode)) {
 487		inode->i_op = &nilfs_file_inode_operations;
 488		inode->i_fop = &nilfs_file_operations;
 489		inode->i_mapping->a_ops = &nilfs_aops;
 490	} else if (S_ISDIR(inode->i_mode)) {
 491		inode->i_op = &nilfs_dir_inode_operations;
 492		inode->i_fop = &nilfs_dir_operations;
 493		inode->i_mapping->a_ops = &nilfs_aops;
 494	} else if (S_ISLNK(inode->i_mode)) {
 495		inode->i_op = &nilfs_symlink_inode_operations;
 496		inode_nohighmem(inode);
 497		inode->i_mapping->a_ops = &nilfs_aops;
 498	} else {
 499		inode->i_op = &nilfs_special_inode_operations;
 500		init_special_inode(
 501			inode, inode->i_mode,
 502			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
 503	}
 504	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 505	brelse(bh);
 506	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 507	nilfs_set_inode_flags(inode);
 508	mapping_set_gfp_mask(inode->i_mapping,
 509			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 510	return 0;
 511
 512 failed_unmap:
 513	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 514	brelse(bh);
 515
 516 bad_inode:
 517	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 518	return err;
 519}
 520
 521static int nilfs_iget_test(struct inode *inode, void *opaque)
 522{
 523	struct nilfs_iget_args *args = opaque;
 524	struct nilfs_inode_info *ii;
 525
 526	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
 527		return 0;
 528
 529	ii = NILFS_I(inode);
 530	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
 531		return !args->for_gc;
 532
 533	return args->for_gc && args->cno == ii->i_cno;
 534}
 535
 536static int nilfs_iget_set(struct inode *inode, void *opaque)
 537{
 538	struct nilfs_iget_args *args = opaque;
 539
 540	inode->i_ino = args->ino;
 541	if (args->for_gc) {
 542		NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
 543		NILFS_I(inode)->i_cno = args->cno;
 544		NILFS_I(inode)->i_root = NULL;
 545	} else {
 546		if (args->root && args->ino == NILFS_ROOT_INO)
 547			nilfs_get_root(args->root);
 548		NILFS_I(inode)->i_root = args->root;
 549	}
 550	return 0;
 551}
 552
 553struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
 554			    unsigned long ino)
 555{
 556	struct nilfs_iget_args args = {
 557		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 558	};
 559
 560	return ilookup5(sb, ino, nilfs_iget_test, &args);
 561}
 562
 563struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
 564				unsigned long ino)
 565{
 566	struct nilfs_iget_args args = {
 567		.ino = ino, .root = root, .cno = 0, .for_gc = 0
 568	};
 569
 570	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 571}
 572
 573struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
 574			 unsigned long ino)
 575{
 576	struct inode *inode;
 577	int err;
 578
 579	inode = nilfs_iget_locked(sb, root, ino);
 580	if (unlikely(!inode))
 581		return ERR_PTR(-ENOMEM);
 582	if (!(inode->i_state & I_NEW))
 583		return inode;
 584
 585	err = __nilfs_read_inode(sb, root, ino, inode);
 586	if (unlikely(err)) {
 587		iget_failed(inode);
 588		return ERR_PTR(err);
 589	}
 590	unlock_new_inode(inode);
 591	return inode;
 592}
 593
 594struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
 595				__u64 cno)
 596{
 597	struct nilfs_iget_args args = {
 598		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
 599	};
 600	struct inode *inode;
 601	int err;
 602
 603	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
 604	if (unlikely(!inode))
 605		return ERR_PTR(-ENOMEM);
 606	if (!(inode->i_state & I_NEW))
 607		return inode;
 608
 609	err = nilfs_init_gcinode(inode);
 610	if (unlikely(err)) {
 611		iget_failed(inode);
 612		return ERR_PTR(err);
 613	}
 614	unlock_new_inode(inode);
 615	return inode;
 616}
 617
 618void nilfs_write_inode_common(struct inode *inode,
 619			      struct nilfs_inode *raw_inode, int has_bmap)
 620{
 621	struct nilfs_inode_info *ii = NILFS_I(inode);
 622
 623	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 624	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
 625	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
 626	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 627	raw_inode->i_size = cpu_to_le64(inode->i_size);
 628	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
 629	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
 630	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
 631	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
 632	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
 633
 634	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
 635	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
 636
 637	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
 638		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 639
 640		/* zero-fill unused portion in the case of super root block */
 641		raw_inode->i_xattr = 0;
 642		raw_inode->i_pad = 0;
 643		memset((void *)raw_inode + sizeof(*raw_inode), 0,
 644		       nilfs->ns_inode_size - sizeof(*raw_inode));
 645	}
 646
 647	if (has_bmap)
 648		nilfs_bmap_write(ii->i_bmap, raw_inode);
 649	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 650		raw_inode->i_device_code =
 651			cpu_to_le64(huge_encode_dev(inode->i_rdev));
 652	/*
 653	 * When extending inode, nilfs->ns_inode_size should be checked
 654	 * for substitutions of appended fields.
 655	 */
 656}
 657
 658void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
 659{
 660	ino_t ino = inode->i_ino;
 661	struct nilfs_inode_info *ii = NILFS_I(inode);
 662	struct inode *ifile = ii->i_root->ifile;
 663	struct nilfs_inode *raw_inode;
 664
 665	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
 666
 667	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
 668		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
 669	if (flags & I_DIRTY_DATASYNC)
 670		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
 671
 672	nilfs_write_inode_common(inode, raw_inode, 0);
 673		/*
 674		 * XXX: call with has_bmap = 0 is a workaround to avoid
 675		 * deadlock of bmap.  This delays update of i_bmap to just
 676		 * before writing.
 677		 */
 678
 679	nilfs_ifile_unmap_inode(ifile, ino, ibh);
 680}
 681
 682#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
 683
 684static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
 685				unsigned long from)
 686{
 687	__u64 b;
 688	int ret;
 689
 690	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 691		return;
 692repeat:
 693	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
 694	if (ret == -ENOENT)
 695		return;
 696	else if (ret < 0)
 697		goto failed;
 698
 699	if (b < from)
 700		return;
 701
 702	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
 703	ret = nilfs_bmap_truncate(ii->i_bmap, b);
 704	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
 705	if (!ret || (ret == -ENOMEM &&
 706		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
 707		goto repeat;
 708
 709failed:
 710	nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
 711		   ret, ii->vfs_inode.i_ino);
 
 712}
 713
 714void nilfs_truncate(struct inode *inode)
 715{
 716	unsigned long blkoff;
 717	unsigned int blocksize;
 718	struct nilfs_transaction_info ti;
 719	struct super_block *sb = inode->i_sb;
 720	struct nilfs_inode_info *ii = NILFS_I(inode);
 721
 722	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 723		return;
 724	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 725		return;
 726
 727	blocksize = sb->s_blocksize;
 728	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
 729	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 730
 731	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
 732
 733	nilfs_truncate_bmap(ii, blkoff);
 734
 735	inode->i_mtime = inode->i_ctime = current_time(inode);
 736	if (IS_SYNC(inode))
 737		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 738
 739	nilfs_mark_inode_dirty(inode);
 740	nilfs_set_file_dirty(inode, 0);
 741	nilfs_transaction_commit(sb);
 742	/*
 743	 * May construct a logical segment and may fail in sync mode.
 744	 * But truncate has no return value.
 745	 */
 746}
 747
 748static void nilfs_clear_inode(struct inode *inode)
 749{
 750	struct nilfs_inode_info *ii = NILFS_I(inode);
 751
 752	/*
 753	 * Free resources allocated in nilfs_read_inode(), here.
 754	 */
 755	BUG_ON(!list_empty(&ii->i_dirty));
 756	brelse(ii->i_bh);
 757	ii->i_bh = NULL;
 758
 759	if (nilfs_is_metadata_file_inode(inode))
 760		nilfs_mdt_clear(inode);
 761
 762	if (test_bit(NILFS_I_BMAP, &ii->i_state))
 763		nilfs_bmap_clear(ii->i_bmap);
 764
 765	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
 766
 767	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
 768		nilfs_put_root(ii->i_root);
 769}
 770
 771void nilfs_evict_inode(struct inode *inode)
 772{
 773	struct nilfs_transaction_info ti;
 774	struct super_block *sb = inode->i_sb;
 775	struct nilfs_inode_info *ii = NILFS_I(inode);
 776	int ret;
 777
 778	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
 779		truncate_inode_pages_final(&inode->i_data);
 780		clear_inode(inode);
 781		nilfs_clear_inode(inode);
 782		return;
 783	}
 784	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
 785
 786	truncate_inode_pages_final(&inode->i_data);
 787
 788	/* TODO: some of the following operations may fail.  */
 789	nilfs_truncate_bmap(ii, 0);
 790	nilfs_mark_inode_dirty(inode);
 791	clear_inode(inode);
 792
 793	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
 794	if (!ret)
 795		atomic64_dec(&ii->i_root->inodes_count);
 796
 797	nilfs_clear_inode(inode);
 798
 799	if (IS_SYNC(inode))
 800		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 801	nilfs_transaction_commit(sb);
 802	/*
 803	 * May construct a logical segment and may fail in sync mode.
 804	 * But delete_inode has no return value.
 805	 */
 806}
 807
 808int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
 809{
 810	struct nilfs_transaction_info ti;
 811	struct inode *inode = d_inode(dentry);
 812	struct super_block *sb = inode->i_sb;
 813	int err;
 814
 815	err = setattr_prepare(dentry, iattr);
 816	if (err)
 817		return err;
 818
 819	err = nilfs_transaction_begin(sb, &ti, 0);
 820	if (unlikely(err))
 821		return err;
 822
 823	if ((iattr->ia_valid & ATTR_SIZE) &&
 824	    iattr->ia_size != i_size_read(inode)) {
 825		inode_dio_wait(inode);
 826		truncate_setsize(inode, iattr->ia_size);
 827		nilfs_truncate(inode);
 828	}
 829
 830	setattr_copy(inode, iattr);
 831	mark_inode_dirty(inode);
 832
 833	if (iattr->ia_valid & ATTR_MODE) {
 834		err = nilfs_acl_chmod(inode);
 835		if (unlikely(err))
 836			goto out_err;
 837	}
 838
 839	return nilfs_transaction_commit(sb);
 840
 841out_err:
 842	nilfs_transaction_abort(sb);
 843	return err;
 844}
 845
 846int nilfs_permission(struct inode *inode, int mask)
 847{
 848	struct nilfs_root *root = NILFS_I(inode)->i_root;
 849
 850	if ((mask & MAY_WRITE) && root &&
 851	    root->cno != NILFS_CPTREE_CURRENT_CNO)
 852		return -EROFS; /* snapshot is not writable */
 853
 854	return generic_permission(inode, mask);
 855}
 856
 857int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
 858{
 859	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 860	struct nilfs_inode_info *ii = NILFS_I(inode);
 861	int err;
 862
 863	spin_lock(&nilfs->ns_inode_lock);
 864	if (ii->i_bh == NULL) {
 865		spin_unlock(&nilfs->ns_inode_lock);
 866		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
 867						  inode->i_ino, pbh);
 868		if (unlikely(err))
 869			return err;
 870		spin_lock(&nilfs->ns_inode_lock);
 871		if (ii->i_bh == NULL)
 872			ii->i_bh = *pbh;
 873		else {
 874			brelse(*pbh);
 875			*pbh = ii->i_bh;
 876		}
 877	} else
 878		*pbh = ii->i_bh;
 879
 880	get_bh(*pbh);
 881	spin_unlock(&nilfs->ns_inode_lock);
 882	return 0;
 883}
 884
 885int nilfs_inode_dirty(struct inode *inode)
 886{
 887	struct nilfs_inode_info *ii = NILFS_I(inode);
 888	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 889	int ret = 0;
 890
 891	if (!list_empty(&ii->i_dirty)) {
 892		spin_lock(&nilfs->ns_inode_lock);
 893		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
 894			test_bit(NILFS_I_BUSY, &ii->i_state);
 895		spin_unlock(&nilfs->ns_inode_lock);
 896	}
 897	return ret;
 898}
 899
 900int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
 901{
 902	struct nilfs_inode_info *ii = NILFS_I(inode);
 903	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 904
 905	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
 906
 907	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
 908		return 0;
 909
 910	spin_lock(&nilfs->ns_inode_lock);
 911	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
 912	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
 913		/*
 914		 * Because this routine may race with nilfs_dispose_list(),
 915		 * we have to check NILFS_I_QUEUED here, too.
 916		 */
 917		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
 918			/*
 919			 * This will happen when somebody is freeing
 920			 * this inode.
 921			 */
 922			nilfs_warn(inode->i_sb,
 923				   "cannot set file dirty (ino=%lu): the file is being freed",
 924				   inode->i_ino);
 925			spin_unlock(&nilfs->ns_inode_lock);
 926			return -EINVAL; /*
 927					 * NILFS_I_DIRTY may remain for
 928					 * freeing inode.
 929					 */
 930		}
 931		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
 932		set_bit(NILFS_I_QUEUED, &ii->i_state);
 933	}
 934	spin_unlock(&nilfs->ns_inode_lock);
 935	return 0;
 936}
 937
 938int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
 939{
 940	struct buffer_head *ibh;
 941	int err;
 942
 943	err = nilfs_load_inode_block(inode, &ibh);
 944	if (unlikely(err)) {
 945		nilfs_warn(inode->i_sb,
 946			   "cannot mark inode dirty (ino=%lu): error %d loading inode block",
 947			   inode->i_ino, err);
 948		return err;
 949	}
 950	nilfs_update_inode(inode, ibh, flags);
 951	mark_buffer_dirty(ibh);
 952	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
 953	brelse(ibh);
 954	return 0;
 955}
 956
 957/**
 958 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
 959 * @inode: inode of the file to be registered.
 960 *
 961 * nilfs_dirty_inode() loads a inode block containing the specified
 962 * @inode and copies data from a nilfs_inode to a corresponding inode
 963 * entry in the inode block. This operation is excluded from the segment
 964 * construction. This function can be called both as a single operation
 965 * and as a part of indivisible file operations.
 966 */
 967void nilfs_dirty_inode(struct inode *inode, int flags)
 968{
 969	struct nilfs_transaction_info ti;
 970	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 971
 972	if (is_bad_inode(inode)) {
 973		nilfs_warn(inode->i_sb,
 974			   "tried to mark bad_inode dirty. ignored.");
 975		dump_stack();
 976		return;
 977	}
 978	if (mdi) {
 979		nilfs_mdt_mark_dirty(inode);
 980		return;
 981	}
 982	nilfs_transaction_begin(inode->i_sb, &ti, 0);
 983	__nilfs_mark_inode_dirty(inode, flags);
 984	nilfs_transaction_commit(inode->i_sb); /* never fails */
 985}
 986
 987int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 988		 __u64 start, __u64 len)
 989{
 990	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
 991	__u64 logical = 0, phys = 0, size = 0;
 992	__u32 flags = 0;
 993	loff_t isize;
 994	sector_t blkoff, end_blkoff;
 995	sector_t delalloc_blkoff;
 996	unsigned long delalloc_blklen;
 997	unsigned int blkbits = inode->i_blkbits;
 998	int ret, n;
 999
1000	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
1001	if (ret)
1002		return ret;
1003
1004	inode_lock(inode);
1005
1006	isize = i_size_read(inode);
1007
1008	blkoff = start >> blkbits;
1009	end_blkoff = (start + len - 1) >> blkbits;
1010
1011	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1012							&delalloc_blkoff);
1013
1014	do {
1015		__u64 blkphy;
1016		unsigned int maxblocks;
1017
1018		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1019			if (size) {
1020				/* End of the current extent */
1021				ret = fiemap_fill_next_extent(
1022					fieinfo, logical, phys, size, flags);
1023				if (ret)
1024					break;
1025			}
1026			if (blkoff > end_blkoff)
1027				break;
1028
1029			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1030			logical = blkoff << blkbits;
1031			phys = 0;
1032			size = delalloc_blklen << blkbits;
1033
1034			blkoff = delalloc_blkoff + delalloc_blklen;
1035			delalloc_blklen = nilfs_find_uncommitted_extent(
1036				inode, blkoff, &delalloc_blkoff);
1037			continue;
1038		}
1039
1040		/*
1041		 * Limit the number of blocks that we look up so as
1042		 * not to get into the next delayed allocation extent.
1043		 */
1044		maxblocks = INT_MAX;
1045		if (delalloc_blklen)
1046			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1047					  maxblocks);
1048		blkphy = 0;
1049
1050		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1051		n = nilfs_bmap_lookup_contig(
1052			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1053		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1054
1055		if (n < 0) {
1056			int past_eof;
1057
1058			if (unlikely(n != -ENOENT))
1059				break; /* error */
1060
1061			/* HOLE */
1062			blkoff++;
1063			past_eof = ((blkoff << blkbits) >= isize);
1064
1065			if (size) {
1066				/* End of the current extent */
1067
1068				if (past_eof)
1069					flags |= FIEMAP_EXTENT_LAST;
1070
1071				ret = fiemap_fill_next_extent(
1072					fieinfo, logical, phys, size, flags);
1073				if (ret)
1074					break;
1075				size = 0;
1076			}
1077			if (blkoff > end_blkoff || past_eof)
1078				break;
1079		} else {
1080			if (size) {
1081				if (phys && blkphy << blkbits == phys + size) {
1082					/* The current extent goes on */
1083					size += n << blkbits;
1084				} else {
1085					/* Terminate the current extent */
1086					ret = fiemap_fill_next_extent(
1087						fieinfo, logical, phys, size,
1088						flags);
1089					if (ret || blkoff > end_blkoff)
1090						break;
1091
1092					/* Start another extent */
1093					flags = FIEMAP_EXTENT_MERGED;
1094					logical = blkoff << blkbits;
1095					phys = blkphy << blkbits;
1096					size = n << blkbits;
1097				}
1098			} else {
1099				/* Start a new extent */
1100				flags = FIEMAP_EXTENT_MERGED;
1101				logical = blkoff << blkbits;
1102				phys = blkphy << blkbits;
1103				size = n << blkbits;
1104			}
1105			blkoff += n;
1106		}
1107		cond_resched();
1108	} while (true);
1109
1110	/* If ret is 1 then we just hit the end of the extent array */
1111	if (ret == 1)
1112		ret = 0;
1113
1114	inode_unlock(inode);
1115	return ret;
1116}