Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 *  linux/fs/ext3/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  Goal-directed block allocation by Stephen Tweedie
  16 *	(sct@redhat.com), 1993, 1998
  17 *  Big-endian to little-endian byte-swapping/bitmaps by
  18 *        David S. Miller (davem@caip.rutgers.edu), 1995
  19 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  20 *	(jj@sunsite.ms.mff.cuni.cz)
  21 *
  22 *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  23 */
  24
 
 
 
 
 
  25#include <linux/highuid.h>
 
  26#include <linux/quotaops.h>
 
 
  27#include <linux/writeback.h>
  28#include <linux/mpage.h>
 
 
 
  29#include <linux/namei.h>
  30#include "ext3.h"
  31#include "xattr.h"
  32#include "acl.h"
  33
  34static int ext3_writepage_trans_blocks(struct inode *inode);
  35static int ext3_block_truncate_page(struct inode *inode, loff_t from);
  36
  37/*
  38 * Test whether an inode is a fast symlink.
  39 */
  40static int ext3_inode_is_fast_symlink(struct inode *inode)
  41{
  42	int ea_blocks = EXT3_I(inode)->i_file_acl ?
  43		(inode->i_sb->s_blocksize >> 9) : 0;
  44
  45	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  46}
  47
  48/*
  49 * The ext3 forget function must perform a revoke if we are freeing data
  50 * which has been journaled.  Metadata (eg. indirect blocks) must be
  51 * revoked in all cases.
  52 *
  53 * "bh" may be NULL: a metadata block may have been freed from memory
  54 * but there may still be a record of it in the journal, and that record
  55 * still needs to be revoked.
  56 */
  57int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  58			struct buffer_head *bh, ext3_fsblk_t blocknr)
  59{
  60	int err;
  61
  62	might_sleep();
  63
  64	trace_ext3_forget(inode, is_metadata, blocknr);
  65	BUFFER_TRACE(bh, "enter");
  66
  67	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  68		  "data mode %lx\n",
  69		  bh, is_metadata, inode->i_mode,
  70		  test_opt(inode->i_sb, DATA_FLAGS));
  71
  72	/* Never use the revoke function if we are doing full data
  73	 * journaling: there is no need to, and a V1 superblock won't
  74	 * support it.  Otherwise, only skip the revoke on un-journaled
  75	 * data blocks. */
  76
  77	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
  78	    (!is_metadata && !ext3_should_journal_data(inode))) {
  79		if (bh) {
  80			BUFFER_TRACE(bh, "call journal_forget");
  81			return ext3_journal_forget(handle, bh);
  82		}
  83		return 0;
  84	}
  85
  86	/*
  87	 * data!=journal && (is_metadata || should_journal_data(inode))
  88	 */
  89	BUFFER_TRACE(bh, "call ext3_journal_revoke");
  90	err = ext3_journal_revoke(handle, blocknr, bh);
  91	if (err)
  92		ext3_abort(inode->i_sb, __func__,
  93			   "error %d when attempting revoke", err);
  94	BUFFER_TRACE(bh, "exit");
  95	return err;
  96}
  97
  98/*
  99 * Work out how many blocks we need to proceed with the next chunk of a
 100 * truncate transaction.
 101 */
 102static unsigned long blocks_for_truncate(struct inode *inode)
 103{
 104	unsigned long needed;
 105
 106	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
 107
 108	/* Give ourselves just enough room to cope with inodes in which
 109	 * i_blocks is corrupt: we've seen disk corruptions in the past
 110	 * which resulted in random data in an inode which looked enough
 111	 * like a regular file for ext3 to try to delete it.  Things
 112	 * will go a bit crazy if that happens, but at least we should
 113	 * try not to panic the whole kernel. */
 114	if (needed < 2)
 115		needed = 2;
 116
 117	/* But we need to bound the transaction so we don't overflow the
 118	 * journal. */
 119	if (needed > EXT3_MAX_TRANS_DATA)
 120		needed = EXT3_MAX_TRANS_DATA;
 121
 122	return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
 123}
 124
 125/*
 126 * Truncate transactions can be complex and absolutely huge.  So we need to
 127 * be able to restart the transaction at a conventient checkpoint to make
 128 * sure we don't overflow the journal.
 129 *
 130 * start_transaction gets us a new handle for a truncate transaction,
 131 * and extend_transaction tries to extend the existing one a bit.  If
 132 * extend fails, we need to propagate the failure up and restart the
 133 * transaction in the top-level truncate loop. --sct
 134 */
 135static handle_t *start_transaction(struct inode *inode)
 136{
 137	handle_t *result;
 138
 139	result = ext3_journal_start(inode, blocks_for_truncate(inode));
 140	if (!IS_ERR(result))
 141		return result;
 142
 143	ext3_std_error(inode->i_sb, PTR_ERR(result));
 144	return result;
 145}
 146
 147/*
 148 * Try to extend this transaction for the purposes of truncation.
 149 *
 150 * Returns 0 if we managed to create more room.  If we can't create more
 151 * room, and the transaction must be restarted we return 1.
 152 */
 153static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
 154{
 155	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
 156		return 0;
 157	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
 158		return 0;
 159	return 1;
 160}
 161
 162/*
 163 * Restart the transaction associated with *handle.  This does a commit,
 164 * so before we call here everything must be consistently dirtied against
 165 * this transaction.
 166 */
 167static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
 168{
 169	int ret;
 170
 171	jbd_debug(2, "restarting handle %p\n", handle);
 172	/*
 173	 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
 174	 * At this moment, get_block can be called only for blocks inside
 175	 * i_size since page cache has been already dropped and writes are
 176	 * blocked by i_mutex. So we can safely drop the truncate_mutex.
 177	 */
 178	mutex_unlock(&EXT3_I(inode)->truncate_mutex);
 179	ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
 180	mutex_lock(&EXT3_I(inode)->truncate_mutex);
 181	return ret;
 182}
 183
 184/*
 185 * Called at inode eviction from icache
 186 */
 187void ext3_evict_inode (struct inode *inode)
 188{
 189	struct ext3_inode_info *ei = EXT3_I(inode);
 190	struct ext3_block_alloc_info *rsv;
 191	handle_t *handle;
 192	int want_delete = 0;
 193
 194	trace_ext3_evict_inode(inode);
 195	if (!inode->i_nlink && !is_bad_inode(inode)) {
 196		dquot_initialize(inode);
 197		want_delete = 1;
 198	}
 199
 200	/*
 201	 * When journalling data dirty buffers are tracked only in the journal.
 202	 * So although mm thinks everything is clean and ready for reaping the
 203	 * inode might still have some pages to write in the running
 204	 * transaction or waiting to be checkpointed. Thus calling
 205	 * journal_invalidatepage() (via truncate_inode_pages()) to discard
 206	 * these buffers can cause data loss. Also even if we did not discard
 207	 * these buffers, we would have no way to find them after the inode
 208	 * is reaped and thus user could see stale data if he tries to read
 209	 * them before the transaction is checkpointed. So be careful and
 210	 * force everything to disk here... We use ei->i_datasync_tid to
 211	 * store the newest transaction containing inode's data.
 212	 *
 213	 * Note that directories do not have this problem because they don't
 214	 * use page cache.
 215	 *
 216	 * The s_journal check handles the case when ext3_get_journal() fails
 217	 * and puts the journal inode.
 218	 */
 219	if (inode->i_nlink && ext3_should_journal_data(inode) &&
 220	    EXT3_SB(inode->i_sb)->s_journal &&
 221	    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
 222		tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
 223		journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
 224
 225		log_start_commit(journal, commit_tid);
 226		log_wait_commit(journal, commit_tid);
 227		filemap_write_and_wait(&inode->i_data);
 228	}
 229	truncate_inode_pages(&inode->i_data, 0);
 230
 231	ext3_discard_reservation(inode);
 232	rsv = ei->i_block_alloc_info;
 233	ei->i_block_alloc_info = NULL;
 234	if (unlikely(rsv))
 235		kfree(rsv);
 236
 237	if (!want_delete)
 238		goto no_delete;
 239
 240	handle = start_transaction(inode);
 241	if (IS_ERR(handle)) {
 242		/*
 243		 * If we're going to skip the normal cleanup, we still need to
 244		 * make sure that the in-core orphan linked list is properly
 245		 * cleaned up.
 246		 */
 247		ext3_orphan_del(NULL, inode);
 248		goto no_delete;
 249	}
 250
 251	if (IS_SYNC(inode))
 252		handle->h_sync = 1;
 253	inode->i_size = 0;
 254	if (inode->i_blocks)
 255		ext3_truncate(inode);
 256	/*
 257	 * Kill off the orphan record created when the inode lost the last
 258	 * link.  Note that ext3_orphan_del() has to be able to cope with the
 259	 * deletion of a non-existent orphan - ext3_truncate() could
 260	 * have removed the record.
 261	 */
 262	ext3_orphan_del(handle, inode);
 263	ei->i_dtime = get_seconds();
 264
 265	/*
 266	 * One subtle ordering requirement: if anything has gone wrong
 267	 * (transaction abort, IO errors, whatever), then we can still
 268	 * do these next steps (the fs will already have been marked as
 269	 * having errors), but we can't free the inode if the mark_dirty
 270	 * fails.
 271	 */
 272	if (ext3_mark_inode_dirty(handle, inode)) {
 273		/* If that failed, just dquot_drop() and be done with that */
 274		dquot_drop(inode);
 275		clear_inode(inode);
 276	} else {
 277		ext3_xattr_delete_inode(handle, inode);
 278		dquot_free_inode(inode);
 279		dquot_drop(inode);
 280		clear_inode(inode);
 281		ext3_free_inode(handle, inode);
 282	}
 283	ext3_journal_stop(handle);
 284	return;
 285no_delete:
 286	clear_inode(inode);
 287	dquot_drop(inode);
 288}
 289
 290typedef struct {
 291	__le32	*p;
 292	__le32	key;
 293	struct buffer_head *bh;
 294} Indirect;
 295
 296static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
 297{
 298	p->key = *(p->p = v);
 299	p->bh = bh;
 300}
 301
 302static int verify_chain(Indirect *from, Indirect *to)
 303{
 304	while (from <= to && from->key == *from->p)
 305		from++;
 306	return (from > to);
 307}
 308
 309/**
 310 *	ext3_block_to_path - parse the block number into array of offsets
 311 *	@inode: inode in question (we are only interested in its superblock)
 312 *	@i_block: block number to be parsed
 313 *	@offsets: array to store the offsets in
 314 *      @boundary: set this non-zero if the referred-to block is likely to be
 315 *             followed (on disk) by an indirect block.
 316 *
 317 *	To store the locations of file's data ext3 uses a data structure common
 318 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 319 *	data blocks at leaves and indirect blocks in intermediate nodes.
 320 *	This function translates the block number into path in that tree -
 321 *	return value is the path length and @offsets[n] is the offset of
 322 *	pointer to (n+1)th node in the nth one. If @block is out of range
 323 *	(negative or too large) warning is printed and zero returned.
 324 *
 325 *	Note: function doesn't find node addresses, so no IO is needed. All
 326 *	we need to know is the capacity of indirect blocks (taken from the
 327 *	inode->i_sb).
 328 */
 329
 330/*
 331 * Portability note: the last comparison (check that we fit into triple
 332 * indirect block) is spelled differently, because otherwise on an
 333 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 334 * if our filesystem had 8Kb blocks. We might use long long, but that would
 335 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 336 * i_block would have to be negative in the very beginning, so we would not
 337 * get there at all.
 338 */
 339
 340static int ext3_block_to_path(struct inode *inode,
 341			long i_block, int offsets[4], int *boundary)
 342{
 343	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
 344	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
 345	const long direct_blocks = EXT3_NDIR_BLOCKS,
 346		indirect_blocks = ptrs,
 347		double_blocks = (1 << (ptrs_bits * 2));
 348	int n = 0;
 349	int final = 0;
 350
 351	if (i_block < 0) {
 352		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
 353	} else if (i_block < direct_blocks) {
 354		offsets[n++] = i_block;
 355		final = direct_blocks;
 356	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
 357		offsets[n++] = EXT3_IND_BLOCK;
 358		offsets[n++] = i_block;
 359		final = ptrs;
 360	} else if ((i_block -= indirect_blocks) < double_blocks) {
 361		offsets[n++] = EXT3_DIND_BLOCK;
 362		offsets[n++] = i_block >> ptrs_bits;
 363		offsets[n++] = i_block & (ptrs - 1);
 364		final = ptrs;
 365	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
 366		offsets[n++] = EXT3_TIND_BLOCK;
 367		offsets[n++] = i_block >> (ptrs_bits * 2);
 368		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
 369		offsets[n++] = i_block & (ptrs - 1);
 370		final = ptrs;
 371	} else {
 372		ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
 373	}
 374	if (boundary)
 375		*boundary = final - 1 - (i_block & (ptrs - 1));
 376	return n;
 377}
 378
 379/**
 380 *	ext3_get_branch - read the chain of indirect blocks leading to data
 381 *	@inode: inode in question
 382 *	@depth: depth of the chain (1 - direct pointer, etc.)
 383 *	@offsets: offsets of pointers in inode/indirect blocks
 384 *	@chain: place to store the result
 385 *	@err: here we store the error value
 386 *
 387 *	Function fills the array of triples <key, p, bh> and returns %NULL
 388 *	if everything went OK or the pointer to the last filled triple
 389 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 390 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 391 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 392 *	number (it points into struct inode for i==0 and into the bh->b_data
 393 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 394 *	block for i>0 and NULL for i==0. In other words, it holds the block
 395 *	numbers of the chain, addresses they were taken from (and where we can
 396 *	verify that chain did not change) and buffer_heads hosting these
 397 *	numbers.
 398 *
 399 *	Function stops when it stumbles upon zero pointer (absent block)
 400 *		(pointer to last triple returned, *@err == 0)
 401 *	or when it gets an IO error reading an indirect block
 402 *		(ditto, *@err == -EIO)
 403 *	or when it notices that chain had been changed while it was reading
 404 *		(ditto, *@err == -EAGAIN)
 405 *	or when it reads all @depth-1 indirect blocks successfully and finds
 406 *	the whole chain, all way to the data (returns %NULL, *err == 0).
 407 */
 408static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
 409				 Indirect chain[4], int *err)
 410{
 411	struct super_block *sb = inode->i_sb;
 412	Indirect *p = chain;
 413	struct buffer_head *bh;
 414
 415	*err = 0;
 416	/* i_data is not going away, no lock needed */
 417	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
 418	if (!p->key)
 419		goto no_block;
 420	while (--depth) {
 421		bh = sb_bread(sb, le32_to_cpu(p->key));
 422		if (!bh)
 423			goto failure;
 424		/* Reader: pointers */
 425		if (!verify_chain(chain, p))
 426			goto changed;
 427		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
 428		/* Reader: end */
 429		if (!p->key)
 430			goto no_block;
 431	}
 432	return NULL;
 433
 434changed:
 435	brelse(bh);
 436	*err = -EAGAIN;
 437	goto no_block;
 438failure:
 439	*err = -EIO;
 440no_block:
 441	return p;
 442}
 443
 444/**
 445 *	ext3_find_near - find a place for allocation with sufficient locality
 446 *	@inode: owner
 447 *	@ind: descriptor of indirect block.
 448 *
 449 *	This function returns the preferred place for block allocation.
 450 *	It is used when heuristic for sequential allocation fails.
 451 *	Rules are:
 452 *	  + if there is a block to the left of our position - allocate near it.
 453 *	  + if pointer will live in indirect block - allocate near that block.
 454 *	  + if pointer will live in inode - allocate in the same
 455 *	    cylinder group.
 456 *
 457 * In the latter case we colour the starting block by the callers PID to
 458 * prevent it from clashing with concurrent allocations for a different inode
 459 * in the same block group.   The PID is used here so that functionally related
 460 * files will be close-by on-disk.
 461 *
 462 *	Caller must make sure that @ind is valid and will stay that way.
 463 */
 464static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
 465{
 466	struct ext3_inode_info *ei = EXT3_I(inode);
 467	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
 468	__le32 *p;
 469	ext3_fsblk_t bg_start;
 470	ext3_grpblk_t colour;
 471
 472	/* Try to find previous block */
 473	for (p = ind->p - 1; p >= start; p--) {
 474		if (*p)
 475			return le32_to_cpu(*p);
 476	}
 477
 478	/* No such thing, so let's try location of indirect block */
 479	if (ind->bh)
 480		return ind->bh->b_blocknr;
 481
 482	/*
 483	 * It is going to be referred to from the inode itself? OK, just put it
 484	 * into the same cylinder group then.
 485	 */
 486	bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
 487	colour = (current->pid % 16) *
 488			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
 489	return bg_start + colour;
 490}
 491
 492/**
 493 *	ext3_find_goal - find a preferred place for allocation.
 494 *	@inode: owner
 495 *	@block:  block we want
 496 *	@partial: pointer to the last triple within a chain
 497 *
 498 *	Normally this function find the preferred place for block allocation,
 499 *	returns it.
 500 */
 501
 502static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
 503				   Indirect *partial)
 504{
 505	struct ext3_block_alloc_info *block_i;
 506
 507	block_i =  EXT3_I(inode)->i_block_alloc_info;
 508
 509	/*
 510	 * try the heuristic for sequential allocation,
 511	 * failing that at least try to get decent locality.
 512	 */
 513	if (block_i && (block == block_i->last_alloc_logical_block + 1)
 514		&& (block_i->last_alloc_physical_block != 0)) {
 515		return block_i->last_alloc_physical_block + 1;
 516	}
 517
 518	return ext3_find_near(inode, partial);
 519}
 520
 521/**
 522 *	ext3_blks_to_allocate - Look up the block map and count the number
 523 *	of direct blocks need to be allocated for the given branch.
 524 *
 525 *	@branch: chain of indirect blocks
 526 *	@k: number of blocks need for indirect blocks
 527 *	@blks: number of data blocks to be mapped.
 528 *	@blocks_to_boundary:  the offset in the indirect block
 529 *
 530 *	return the total number of blocks to be allocate, including the
 531 *	direct and indirect blocks.
 532 */
 533static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
 534		int blocks_to_boundary)
 535{
 536	unsigned long count = 0;
 537
 538	/*
 539	 * Simple case, [t,d]Indirect block(s) has not allocated yet
 540	 * then it's clear blocks on that path have not allocated
 541	 */
 542	if (k > 0) {
 543		/* right now we don't handle cross boundary allocation */
 544		if (blks < blocks_to_boundary + 1)
 545			count += blks;
 546		else
 547			count += blocks_to_boundary + 1;
 548		return count;
 549	}
 550
 551	count++;
 552	while (count < blks && count <= blocks_to_boundary &&
 553		le32_to_cpu(*(branch[0].p + count)) == 0) {
 554		count++;
 555	}
 556	return count;
 557}
 558
 559/**
 560 *	ext3_alloc_blocks - multiple allocate blocks needed for a branch
 561 *	@handle: handle for this transaction
 562 *	@inode: owner
 563 *	@goal: preferred place for allocation
 564 *	@indirect_blks: the number of blocks need to allocate for indirect
 565 *			blocks
 566 *	@blks:	number of blocks need to allocated for direct blocks
 567 *	@new_blocks: on return it will store the new block numbers for
 568 *	the indirect blocks(if needed) and the first direct block,
 569 *	@err: here we store the error value
 570 *
 571 *	return the number of direct blocks allocated
 572 */
 573static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
 574			ext3_fsblk_t goal, int indirect_blks, int blks,
 575			ext3_fsblk_t new_blocks[4], int *err)
 576{
 577	int target, i;
 578	unsigned long count = 0;
 579	int index = 0;
 580	ext3_fsblk_t current_block = 0;
 581	int ret = 0;
 582
 583	/*
 584	 * Here we try to allocate the requested multiple blocks at once,
 585	 * on a best-effort basis.
 586	 * To build a branch, we should allocate blocks for
 587	 * the indirect blocks(if not allocated yet), and at least
 588	 * the first direct block of this branch.  That's the
 589	 * minimum number of blocks need to allocate(required)
 590	 */
 591	target = blks + indirect_blks;
 592
 593	while (1) {
 594		count = target;
 595		/* allocating blocks for indirect blocks and direct blocks */
 596		current_block = ext3_new_blocks(handle,inode,goal,&count,err);
 597		if (*err)
 598			goto failed_out;
 599
 600		target -= count;
 601		/* allocate blocks for indirect blocks */
 602		while (index < indirect_blks && count) {
 603			new_blocks[index++] = current_block++;
 604			count--;
 605		}
 606
 607		if (count > 0)
 608			break;
 609	}
 610
 611	/* save the new block number for the first direct block */
 612	new_blocks[index] = current_block;
 613
 614	/* total number of blocks allocated for direct blocks */
 615	ret = count;
 616	*err = 0;
 617	return ret;
 618failed_out:
 619	for (i = 0; i <index; i++)
 620		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 621	return ret;
 622}
 623
 624/**
 625 *	ext3_alloc_branch - allocate and set up a chain of blocks.
 626 *	@handle: handle for this transaction
 627 *	@inode: owner
 628 *	@indirect_blks: number of allocated indirect blocks
 629 *	@blks: number of allocated direct blocks
 630 *	@goal: preferred place for allocation
 631 *	@offsets: offsets (in the blocks) to store the pointers to next.
 632 *	@branch: place to store the chain in.
 633 *
 634 *	This function allocates blocks, zeroes out all but the last one,
 635 *	links them into chain and (if we are synchronous) writes them to disk.
 636 *	In other words, it prepares a branch that can be spliced onto the
 637 *	inode. It stores the information about that chain in the branch[], in
 638 *	the same format as ext3_get_branch() would do. We are calling it after
 639 *	we had read the existing part of chain and partial points to the last
 640 *	triple of that (one with zero ->key). Upon the exit we have the same
 641 *	picture as after the successful ext3_get_block(), except that in one
 642 *	place chain is disconnected - *branch->p is still zero (we did not
 643 *	set the last link), but branch->key contains the number that should
 644 *	be placed into *branch->p to fill that gap.
 645 *
 646 *	If allocation fails we free all blocks we've allocated (and forget
 647 *	their buffer_heads) and return the error value the from failed
 648 *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
 649 *	as described above and return 0.
 650 */
 651static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
 652			int indirect_blks, int *blks, ext3_fsblk_t goal,
 653			int *offsets, Indirect *branch)
 654{
 655	int blocksize = inode->i_sb->s_blocksize;
 656	int i, n = 0;
 657	int err = 0;
 658	struct buffer_head *bh;
 659	int num;
 660	ext3_fsblk_t new_blocks[4];
 661	ext3_fsblk_t current_block;
 662
 663	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
 664				*blks, new_blocks, &err);
 665	if (err)
 666		return err;
 667
 668	branch[0].key = cpu_to_le32(new_blocks[0]);
 669	/*
 670	 * metadata blocks and data blocks are allocated.
 671	 */
 672	for (n = 1; n <= indirect_blks;  n++) {
 673		/*
 674		 * Get buffer_head for parent block, zero it out
 675		 * and set the pointer to new one, then send
 676		 * parent to disk.
 677		 */
 678		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
 679		branch[n].bh = bh;
 680		lock_buffer(bh);
 681		BUFFER_TRACE(bh, "call get_create_access");
 682		err = ext3_journal_get_create_access(handle, bh);
 683		if (err) {
 684			unlock_buffer(bh);
 685			brelse(bh);
 686			goto failed;
 687		}
 688
 689		memset(bh->b_data, 0, blocksize);
 690		branch[n].p = (__le32 *) bh->b_data + offsets[n];
 691		branch[n].key = cpu_to_le32(new_blocks[n]);
 692		*branch[n].p = branch[n].key;
 693		if ( n == indirect_blks) {
 694			current_block = new_blocks[n];
 695			/*
 696			 * End of chain, update the last new metablock of
 697			 * the chain to point to the new allocated
 698			 * data blocks numbers
 699			 */
 700			for (i=1; i < num; i++)
 701				*(branch[n].p + i) = cpu_to_le32(++current_block);
 702		}
 703		BUFFER_TRACE(bh, "marking uptodate");
 704		set_buffer_uptodate(bh);
 705		unlock_buffer(bh);
 706
 707		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
 708		err = ext3_journal_dirty_metadata(handle, bh);
 709		if (err)
 710			goto failed;
 711	}
 712	*blks = num;
 713	return err;
 714failed:
 715	/* Allocation failed, free what we already allocated */
 716	for (i = 1; i <= n ; i++) {
 717		BUFFER_TRACE(branch[i].bh, "call journal_forget");
 718		ext3_journal_forget(handle, branch[i].bh);
 719	}
 720	for (i = 0; i <indirect_blks; i++)
 721		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 722
 723	ext3_free_blocks(handle, inode, new_blocks[i], num);
 724
 725	return err;
 726}
 727
 728/**
 729 * ext3_splice_branch - splice the allocated branch onto inode.
 730 * @handle: handle for this transaction
 731 * @inode: owner
 732 * @block: (logical) number of block we are adding
 733 * @where: location of missing link
 734 * @num:   number of indirect blocks we are adding
 735 * @blks:  number of direct blocks we are adding
 736 *
 737 * This function fills the missing link and does all housekeeping needed in
 738 * inode (->i_blocks, etc.). In case of success we end up with the full
 739 * chain to new block and return 0.
 740 */
 741static int ext3_splice_branch(handle_t *handle, struct inode *inode,
 742			long block, Indirect *where, int num, int blks)
 743{
 744	int i;
 745	int err = 0;
 746	struct ext3_block_alloc_info *block_i;
 747	ext3_fsblk_t current_block;
 748	struct ext3_inode_info *ei = EXT3_I(inode);
 749	struct timespec now;
 750
 751	block_i = ei->i_block_alloc_info;
 752	/*
 753	 * If we're splicing into a [td]indirect block (as opposed to the
 754	 * inode) then we need to get write access to the [td]indirect block
 755	 * before the splice.
 756	 */
 757	if (where->bh) {
 758		BUFFER_TRACE(where->bh, "get_write_access");
 759		err = ext3_journal_get_write_access(handle, where->bh);
 760		if (err)
 761			goto err_out;
 762	}
 763	/* That's it */
 764
 765	*where->p = where->key;
 766
 767	/*
 768	 * Update the host buffer_head or inode to point to more just allocated
 769	 * direct blocks blocks
 770	 */
 771	if (num == 0 && blks > 1) {
 772		current_block = le32_to_cpu(where->key) + 1;
 773		for (i = 1; i < blks; i++)
 774			*(where->p + i ) = cpu_to_le32(current_block++);
 775	}
 776
 777	/*
 778	 * update the most recently allocated logical & physical block
 779	 * in i_block_alloc_info, to assist find the proper goal block for next
 780	 * allocation
 781	 */
 782	if (block_i) {
 783		block_i->last_alloc_logical_block = block + blks - 1;
 784		block_i->last_alloc_physical_block =
 785				le32_to_cpu(where[num].key) + blks - 1;
 786	}
 787
 788	/* We are done with atomic stuff, now do the rest of housekeeping */
 789	now = CURRENT_TIME_SEC;
 790	if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) {
 791		inode->i_ctime = now;
 792		ext3_mark_inode_dirty(handle, inode);
 793	}
 794	/* ext3_mark_inode_dirty already updated i_sync_tid */
 795	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
 796
 797	/* had we spliced it onto indirect block? */
 798	if (where->bh) {
 799		/*
 800		 * If we spliced it onto an indirect block, we haven't
 801		 * altered the inode.  Note however that if it is being spliced
 802		 * onto an indirect block at the very end of the file (the
 803		 * file is growing) then we *will* alter the inode to reflect
 804		 * the new i_size.  But that is not done here - it is done in
 805		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
 806		 */
 807		jbd_debug(5, "splicing indirect only\n");
 808		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
 809		err = ext3_journal_dirty_metadata(handle, where->bh);
 810		if (err)
 811			goto err_out;
 812	} else {
 813		/*
 814		 * OK, we spliced it into the inode itself on a direct block.
 815		 * Inode was dirtied above.
 816		 */
 817		jbd_debug(5, "splicing direct\n");
 818	}
 819	return err;
 820
 821err_out:
 822	for (i = 1; i <= num; i++) {
 823		BUFFER_TRACE(where[i].bh, "call journal_forget");
 824		ext3_journal_forget(handle, where[i].bh);
 825		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
 826	}
 827	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
 828
 829	return err;
 830}
 831
 832/*
 833 * Allocation strategy is simple: if we have to allocate something, we will
 834 * have to go the whole way to leaf. So let's do it before attaching anything
 835 * to tree, set linkage between the newborn blocks, write them if sync is
 836 * required, recheck the path, free and repeat if check fails, otherwise
 837 * set the last missing link (that will protect us from any truncate-generated
 838 * removals - all blocks on the path are immune now) and possibly force the
 839 * write on the parent block.
 840 * That has a nice additional property: no special recovery from the failed
 841 * allocations is needed - we simply release blocks and do not touch anything
 842 * reachable from inode.
 843 *
 844 * `handle' can be NULL if create == 0.
 845 *
 846 * The BKL may not be held on entry here.  Be sure to take it early.
 847 * return > 0, # of blocks mapped or allocated.
 848 * return = 0, if plain lookup failed.
 849 * return < 0, error case.
 850 */
 851int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
 852		sector_t iblock, unsigned long maxblocks,
 853		struct buffer_head *bh_result,
 854		int create)
 855{
 856	int err = -EIO;
 857	int offsets[4];
 858	Indirect chain[4];
 859	Indirect *partial;
 860	ext3_fsblk_t goal;
 861	int indirect_blks;
 862	int blocks_to_boundary = 0;
 863	int depth;
 864	struct ext3_inode_info *ei = EXT3_I(inode);
 865	int count = 0;
 866	ext3_fsblk_t first_block = 0;
 867
 868
 869	trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
 870	J_ASSERT(handle != NULL || create == 0);
 871	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 872
 873	if (depth == 0)
 874		goto out;
 875
 876	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 877
 878	/* Simplest case - block found, no allocation needed */
 879	if (!partial) {
 880		first_block = le32_to_cpu(chain[depth - 1].key);
 881		clear_buffer_new(bh_result);
 882		count++;
 883		/*map more blocks*/
 884		while (count < maxblocks && count <= blocks_to_boundary) {
 885			ext3_fsblk_t blk;
 886
 887			if (!verify_chain(chain, chain + depth - 1)) {
 888				/*
 889				 * Indirect block might be removed by
 890				 * truncate while we were reading it.
 891				 * Handling of that case: forget what we've
 892				 * got now. Flag the err as EAGAIN, so it
 893				 * will reread.
 894				 */
 895				err = -EAGAIN;
 896				count = 0;
 897				break;
 898			}
 899			blk = le32_to_cpu(*(chain[depth-1].p + count));
 900
 901			if (blk == first_block + count)
 902				count++;
 903			else
 904				break;
 905		}
 906		if (err != -EAGAIN)
 907			goto got_it;
 908	}
 909
 910	/* Next simple case - plain lookup or failed read of indirect block */
 911	if (!create || err == -EIO)
 912		goto cleanup;
 913
 914	/*
 915	 * Block out ext3_truncate while we alter the tree
 916	 */
 917	mutex_lock(&ei->truncate_mutex);
 918
 919	/*
 920	 * If the indirect block is missing while we are reading
 921	 * the chain(ext3_get_branch() returns -EAGAIN err), or
 922	 * if the chain has been changed after we grab the semaphore,
 923	 * (either because another process truncated this branch, or
 924	 * another get_block allocated this branch) re-grab the chain to see if
 925	 * the request block has been allocated or not.
 926	 *
 927	 * Since we already block the truncate/other get_block
 928	 * at this point, we will have the current copy of the chain when we
 929	 * splice the branch into the tree.
 930	 */
 931	if (err == -EAGAIN || !verify_chain(chain, partial)) {
 932		while (partial > chain) {
 933			brelse(partial->bh);
 934			partial--;
 935		}
 936		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 937		if (!partial) {
 938			count++;
 939			mutex_unlock(&ei->truncate_mutex);
 940			if (err)
 941				goto cleanup;
 942			clear_buffer_new(bh_result);
 943			goto got_it;
 944		}
 945	}
 946
 947	/*
 948	 * Okay, we need to do block allocation.  Lazily initialize the block
 949	 * allocation info here if necessary
 950	*/
 951	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
 952		ext3_init_block_alloc_info(inode);
 953
 954	goal = ext3_find_goal(inode, iblock, partial);
 955
 956	/* the number of blocks need to allocate for [d,t]indirect blocks */
 957	indirect_blks = (chain + depth) - partial - 1;
 958
 959	/*
 960	 * Next look up the indirect map to count the totoal number of
 961	 * direct blocks to allocate for this branch.
 962	 */
 963	count = ext3_blks_to_allocate(partial, indirect_blks,
 964					maxblocks, blocks_to_boundary);
 965	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
 966				offsets + (partial - chain), partial);
 967
 968	/*
 969	 * The ext3_splice_branch call will free and forget any buffers
 970	 * on the new chain if there is a failure, but that risks using
 971	 * up transaction credits, especially for bitmaps where the
 972	 * credits cannot be returned.  Can we handle this somehow?  We
 973	 * may need to return -EAGAIN upwards in the worst case.  --sct
 974	 */
 975	if (!err)
 976		err = ext3_splice_branch(handle, inode, iblock,
 977					partial, indirect_blks, count);
 978	mutex_unlock(&ei->truncate_mutex);
 979	if (err)
 980		goto cleanup;
 981
 982	set_buffer_new(bh_result);
 983got_it:
 984	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
 985	if (count > blocks_to_boundary)
 986		set_buffer_boundary(bh_result);
 987	err = count;
 988	/* Clean up and exit */
 989	partial = chain + depth - 1;	/* the whole chain */
 990cleanup:
 991	while (partial > chain) {
 992		BUFFER_TRACE(partial->bh, "call brelse");
 993		brelse(partial->bh);
 994		partial--;
 995	}
 996	BUFFER_TRACE(bh_result, "returned");
 997out:
 998	trace_ext3_get_blocks_exit(inode, iblock,
 999				   depth ? le32_to_cpu(chain[depth-1].key) : 0,
1000				   count, err);
1001	return err;
1002}
1003
1004/* Maximum number of blocks we map for direct IO at once. */
1005#define DIO_MAX_BLOCKS 4096
1006/*
1007 * Number of credits we need for writing DIO_MAX_BLOCKS:
1008 * We need sb + group descriptor + bitmap + inode -> 4
1009 * For B blocks with A block pointers per block we need:
1010 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
1011 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
1012 */
1013#define DIO_CREDITS 25
1014
1015static int ext3_get_block(struct inode *inode, sector_t iblock,
1016			struct buffer_head *bh_result, int create)
1017{
1018	handle_t *handle = ext3_journal_current_handle();
1019	int ret = 0, started = 0;
1020	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1021
1022	if (create && !handle) {	/* Direct IO write... */
1023		if (max_blocks > DIO_MAX_BLOCKS)
1024			max_blocks = DIO_MAX_BLOCKS;
1025		handle = ext3_journal_start(inode, DIO_CREDITS +
1026				EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
1027		if (IS_ERR(handle)) {
1028			ret = PTR_ERR(handle);
1029			goto out;
1030		}
1031		started = 1;
1032	}
1033
1034	ret = ext3_get_blocks_handle(handle, inode, iblock,
1035					max_blocks, bh_result, create);
1036	if (ret > 0) {
1037		bh_result->b_size = (ret << inode->i_blkbits);
1038		ret = 0;
1039	}
1040	if (started)
1041		ext3_journal_stop(handle);
1042out:
1043	return ret;
1044}
1045
1046int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1047		u64 start, u64 len)
1048{
1049	return generic_block_fiemap(inode, fieinfo, start, len,
1050				    ext3_get_block);
1051}
1052
1053/*
1054 * `handle' can be NULL if create is zero
1055 */
1056struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1057				long block, int create, int *errp)
1058{
1059	struct buffer_head dummy;
1060	int fatal = 0, err;
1061
1062	J_ASSERT(handle != NULL || create == 0);
1063
1064	dummy.b_state = 0;
1065	dummy.b_blocknr = -1000;
1066	buffer_trace_init(&dummy.b_history);
1067	err = ext3_get_blocks_handle(handle, inode, block, 1,
1068					&dummy, create);
1069	/*
1070	 * ext3_get_blocks_handle() returns number of blocks
1071	 * mapped. 0 in case of a HOLE.
1072	 */
1073	if (err > 0) {
1074		if (err > 1)
1075			WARN_ON(1);
1076		err = 0;
1077	}
1078	*errp = err;
1079	if (!err && buffer_mapped(&dummy)) {
1080		struct buffer_head *bh;
1081		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1082		if (!bh) {
1083			*errp = -EIO;
1084			goto err;
1085		}
1086		if (buffer_new(&dummy)) {
1087			J_ASSERT(create != 0);
1088			J_ASSERT(handle != NULL);
1089
1090			/*
1091			 * Now that we do not always journal data, we should
1092			 * keep in mind whether this should always journal the
1093			 * new buffer as metadata.  For now, regular file
1094			 * writes use ext3_get_block instead, so it's not a
1095			 * problem.
1096			 */
1097			lock_buffer(bh);
1098			BUFFER_TRACE(bh, "call get_create_access");
1099			fatal = ext3_journal_get_create_access(handle, bh);
1100			if (!fatal && !buffer_uptodate(bh)) {
1101				memset(bh->b_data,0,inode->i_sb->s_blocksize);
1102				set_buffer_uptodate(bh);
1103			}
1104			unlock_buffer(bh);
1105			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1106			err = ext3_journal_dirty_metadata(handle, bh);
1107			if (!fatal)
1108				fatal = err;
1109		} else {
1110			BUFFER_TRACE(bh, "not a new buffer");
1111		}
1112		if (fatal) {
1113			*errp = fatal;
1114			brelse(bh);
1115			bh = NULL;
1116		}
1117		return bh;
1118	}
1119err:
1120	return NULL;
1121}
1122
1123struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1124			       int block, int create, int *err)
1125{
1126	struct buffer_head * bh;
1127
1128	bh = ext3_getblk(handle, inode, block, create, err);
1129	if (!bh)
1130		return bh;
1131	if (bh_uptodate_or_lock(bh))
1132		return bh;
1133	get_bh(bh);
1134	bh->b_end_io = end_buffer_read_sync;
1135	submit_bh(READ | REQ_META | REQ_PRIO, bh);
1136	wait_on_buffer(bh);
1137	if (buffer_uptodate(bh))
1138		return bh;
1139	put_bh(bh);
1140	*err = -EIO;
1141	return NULL;
1142}
1143
1144static int walk_page_buffers(	handle_t *handle,
1145				struct buffer_head *head,
1146				unsigned from,
1147				unsigned to,
1148				int *partial,
1149				int (*fn)(	handle_t *handle,
1150						struct buffer_head *bh))
1151{
1152	struct buffer_head *bh;
1153	unsigned block_start, block_end;
1154	unsigned blocksize = head->b_size;
1155	int err, ret = 0;
1156	struct buffer_head *next;
1157
1158	for (	bh = head, block_start = 0;
1159		ret == 0 && (bh != head || !block_start);
1160		block_start = block_end, bh = next)
1161	{
1162		next = bh->b_this_page;
1163		block_end = block_start + blocksize;
1164		if (block_end <= from || block_start >= to) {
1165			if (partial && !buffer_uptodate(bh))
1166				*partial = 1;
1167			continue;
1168		}
1169		err = (*fn)(handle, bh);
1170		if (!ret)
1171			ret = err;
1172	}
1173	return ret;
1174}
1175
1176/*
1177 * To preserve ordering, it is essential that the hole instantiation and
1178 * the data write be encapsulated in a single transaction.  We cannot
1179 * close off a transaction and start a new one between the ext3_get_block()
1180 * and the commit_write().  So doing the journal_start at the start of
1181 * prepare_write() is the right place.
1182 *
1183 * Also, this function can nest inside ext3_writepage() ->
1184 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1185 * has generated enough buffer credits to do the whole page.  So we won't
1186 * block on the journal in that case, which is good, because the caller may
1187 * be PF_MEMALLOC.
1188 *
1189 * By accident, ext3 can be reentered when a transaction is open via
1190 * quota file writes.  If we were to commit the transaction while thus
1191 * reentered, there can be a deadlock - we would be holding a quota
1192 * lock, and the commit would never complete if another thread had a
1193 * transaction open and was blocking on the quota lock - a ranking
1194 * violation.
1195 *
1196 * So what we do is to rely on the fact that journal_stop/journal_start
1197 * will _not_ run commit under these circumstances because handle->h_ref
1198 * is elevated.  We'll still have enough credits for the tiny quotafile
1199 * write.
1200 */
1201static int do_journal_get_write_access(handle_t *handle,
1202					struct buffer_head *bh)
1203{
1204	int dirty = buffer_dirty(bh);
1205	int ret;
1206
1207	if (!buffer_mapped(bh) || buffer_freed(bh))
1208		return 0;
1209	/*
1210	 * __block_prepare_write() could have dirtied some buffers. Clean
1211	 * the dirty bit as jbd2_journal_get_write_access() could complain
1212	 * otherwise about fs integrity issues. Setting of the dirty bit
1213	 * by __block_prepare_write() isn't a real problem here as we clear
1214	 * the bit before releasing a page lock and thus writeback cannot
1215	 * ever write the buffer.
1216	 */
1217	if (dirty)
1218		clear_buffer_dirty(bh);
1219	ret = ext3_journal_get_write_access(handle, bh);
1220	if (!ret && dirty)
1221		ret = ext3_journal_dirty_metadata(handle, bh);
1222	return ret;
1223}
1224
1225/*
1226 * Truncate blocks that were not used by write. We have to truncate the
1227 * pagecache as well so that corresponding buffers get properly unmapped.
1228 */
1229static void ext3_truncate_failed_write(struct inode *inode)
1230{
1231	truncate_inode_pages(inode->i_mapping, inode->i_size);
1232	ext3_truncate(inode);
1233}
1234
1235/*
1236 * Truncate blocks that were not used by direct IO write. We have to zero out
1237 * the last file block as well because direct IO might have written to it.
1238 */
1239static void ext3_truncate_failed_direct_write(struct inode *inode)
1240{
1241	ext3_block_truncate_page(inode, inode->i_size);
1242	ext3_truncate(inode);
1243}
1244
1245static int ext3_write_begin(struct file *file, struct address_space *mapping,
1246				loff_t pos, unsigned len, unsigned flags,
1247				struct page **pagep, void **fsdata)
1248{
1249	struct inode *inode = mapping->host;
1250	int ret;
1251	handle_t *handle;
1252	int retries = 0;
1253	struct page *page;
1254	pgoff_t index;
1255	unsigned from, to;
1256	/* Reserve one block more for addition to orphan list in case
1257	 * we allocate blocks but write fails for some reason */
1258	int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
1259
1260	trace_ext3_write_begin(inode, pos, len, flags);
1261
1262	index = pos >> PAGE_CACHE_SHIFT;
1263	from = pos & (PAGE_CACHE_SIZE - 1);
1264	to = from + len;
1265
1266retry:
1267	page = grab_cache_page_write_begin(mapping, index, flags);
1268	if (!page)
1269		return -ENOMEM;
1270	*pagep = page;
1271
1272	handle = ext3_journal_start(inode, needed_blocks);
1273	if (IS_ERR(handle)) {
1274		unlock_page(page);
1275		page_cache_release(page);
1276		ret = PTR_ERR(handle);
1277		goto out;
1278	}
1279	ret = __block_write_begin(page, pos, len, ext3_get_block);
1280	if (ret)
1281		goto write_begin_failed;
1282
1283	if (ext3_should_journal_data(inode)) {
1284		ret = walk_page_buffers(handle, page_buffers(page),
1285				from, to, NULL, do_journal_get_write_access);
1286	}
1287write_begin_failed:
1288	if (ret) {
1289		/*
1290		 * block_write_begin may have instantiated a few blocks
1291		 * outside i_size.  Trim these off again. Don't need
1292		 * i_size_read because we hold i_mutex.
1293		 *
1294		 * Add inode to orphan list in case we crash before truncate
1295		 * finishes. Do this only if ext3_can_truncate() agrees so
1296		 * that orphan processing code is happy.
1297		 */
1298		if (pos + len > inode->i_size && ext3_can_truncate(inode))
1299			ext3_orphan_add(handle, inode);
1300		ext3_journal_stop(handle);
1301		unlock_page(page);
1302		page_cache_release(page);
1303		if (pos + len > inode->i_size)
1304			ext3_truncate_failed_write(inode);
1305	}
1306	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1307		goto retry;
1308out:
1309	return ret;
1310}
1311
1312
1313int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1314{
1315	int err = journal_dirty_data(handle, bh);
1316	if (err)
1317		ext3_journal_abort_handle(__func__, __func__,
1318						bh, handle, err);
1319	return err;
1320}
1321
1322/* For ordered writepage and write_end functions */
1323static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1324{
1325	/*
1326	 * Write could have mapped the buffer but it didn't copy the data in
1327	 * yet. So avoid filing such buffer into a transaction.
1328	 */
1329	if (buffer_mapped(bh) && buffer_uptodate(bh))
1330		return ext3_journal_dirty_data(handle, bh);
1331	return 0;
1332}
1333
1334/* For write_end() in data=journal mode */
1335static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1336{
1337	if (!buffer_mapped(bh) || buffer_freed(bh))
1338		return 0;
1339	set_buffer_uptodate(bh);
1340	return ext3_journal_dirty_metadata(handle, bh);
1341}
1342
1343/*
1344 * This is nasty and subtle: ext3_write_begin() could have allocated blocks
1345 * for the whole page but later we failed to copy the data in. Update inode
1346 * size according to what we managed to copy. The rest is going to be
1347 * truncated in write_end function.
1348 */
1349static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
1350{
1351	/* What matters to us is i_disksize. We don't write i_size anywhere */
1352	if (pos + copied > inode->i_size)
1353		i_size_write(inode, pos + copied);
1354	if (pos + copied > EXT3_I(inode)->i_disksize) {
1355		EXT3_I(inode)->i_disksize = pos + copied;
1356		mark_inode_dirty(inode);
1357	}
1358}
1359
1360/*
1361 * We need to pick up the new inode size which generic_commit_write gave us
1362 * `file' can be NULL - eg, when called from page_symlink().
1363 *
1364 * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1365 * buffers are managed internally.
1366 */
1367static int ext3_ordered_write_end(struct file *file,
1368				struct address_space *mapping,
1369				loff_t pos, unsigned len, unsigned copied,
1370				struct page *page, void *fsdata)
1371{
1372	handle_t *handle = ext3_journal_current_handle();
1373	struct inode *inode = file->f_mapping->host;
1374	unsigned from, to;
1375	int ret = 0, ret2;
1376
1377	trace_ext3_ordered_write_end(inode, pos, len, copied);
1378	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1379
1380	from = pos & (PAGE_CACHE_SIZE - 1);
1381	to = from + copied;
1382	ret = walk_page_buffers(handle, page_buffers(page),
1383		from, to, NULL, journal_dirty_data_fn);
1384
1385	if (ret == 0)
1386		update_file_sizes(inode, pos, copied);
1387	/*
1388	 * There may be allocated blocks outside of i_size because
1389	 * we failed to copy some data. Prepare for truncate.
1390	 */
1391	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1392		ext3_orphan_add(handle, inode);
1393	ret2 = ext3_journal_stop(handle);
1394	if (!ret)
1395		ret = ret2;
1396	unlock_page(page);
1397	page_cache_release(page);
1398
1399	if (pos + len > inode->i_size)
1400		ext3_truncate_failed_write(inode);
1401	return ret ? ret : copied;
1402}
1403
1404static int ext3_writeback_write_end(struct file *file,
1405				struct address_space *mapping,
1406				loff_t pos, unsigned len, unsigned copied,
1407				struct page *page, void *fsdata)
1408{
1409	handle_t *handle = ext3_journal_current_handle();
1410	struct inode *inode = file->f_mapping->host;
1411	int ret;
1412
1413	trace_ext3_writeback_write_end(inode, pos, len, copied);
1414	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1415	update_file_sizes(inode, pos, copied);
1416	/*
1417	 * There may be allocated blocks outside of i_size because
1418	 * we failed to copy some data. Prepare for truncate.
1419	 */
1420	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1421		ext3_orphan_add(handle, inode);
1422	ret = ext3_journal_stop(handle);
1423	unlock_page(page);
1424	page_cache_release(page);
1425
1426	if (pos + len > inode->i_size)
1427		ext3_truncate_failed_write(inode);
1428	return ret ? ret : copied;
1429}
1430
1431static int ext3_journalled_write_end(struct file *file,
1432				struct address_space *mapping,
1433				loff_t pos, unsigned len, unsigned copied,
1434				struct page *page, void *fsdata)
1435{
1436	handle_t *handle = ext3_journal_current_handle();
1437	struct inode *inode = mapping->host;
1438	struct ext3_inode_info *ei = EXT3_I(inode);
1439	int ret = 0, ret2;
1440	int partial = 0;
1441	unsigned from, to;
1442
1443	trace_ext3_journalled_write_end(inode, pos, len, copied);
1444	from = pos & (PAGE_CACHE_SIZE - 1);
1445	to = from + len;
1446
1447	if (copied < len) {
1448		if (!PageUptodate(page))
1449			copied = 0;
1450		page_zero_new_buffers(page, from + copied, to);
1451		to = from + copied;
1452	}
1453
1454	ret = walk_page_buffers(handle, page_buffers(page), from,
1455				to, &partial, write_end_fn);
1456	if (!partial)
1457		SetPageUptodate(page);
1458
1459	if (pos + copied > inode->i_size)
1460		i_size_write(inode, pos + copied);
1461	/*
1462	 * There may be allocated blocks outside of i_size because
1463	 * we failed to copy some data. Prepare for truncate.
1464	 */
1465	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1466		ext3_orphan_add(handle, inode);
1467	ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1468	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
1469	if (inode->i_size > ei->i_disksize) {
1470		ei->i_disksize = inode->i_size;
1471		ret2 = ext3_mark_inode_dirty(handle, inode);
1472		if (!ret)
1473			ret = ret2;
1474	}
1475
1476	ret2 = ext3_journal_stop(handle);
1477	if (!ret)
1478		ret = ret2;
1479	unlock_page(page);
1480	page_cache_release(page);
1481
1482	if (pos + len > inode->i_size)
1483		ext3_truncate_failed_write(inode);
1484	return ret ? ret : copied;
1485}
1486
1487/*
1488 * bmap() is special.  It gets used by applications such as lilo and by
1489 * the swapper to find the on-disk block of a specific piece of data.
1490 *
1491 * Naturally, this is dangerous if the block concerned is still in the
1492 * journal.  If somebody makes a swapfile on an ext3 data-journaling
1493 * filesystem and enables swap, then they may get a nasty shock when the
1494 * data getting swapped to that swapfile suddenly gets overwritten by
1495 * the original zero's written out previously to the journal and
1496 * awaiting writeback in the kernel's buffer cache.
1497 *
1498 * So, if we see any bmap calls here on a modified, data-journaled file,
1499 * take extra steps to flush any blocks which might be in the cache.
1500 */
1501static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1502{
1503	struct inode *inode = mapping->host;
1504	journal_t *journal;
1505	int err;
1506
1507	if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
1508		/*
1509		 * This is a REALLY heavyweight approach, but the use of
1510		 * bmap on dirty files is expected to be extremely rare:
1511		 * only if we run lilo or swapon on a freshly made file
1512		 * do we expect this to happen.
1513		 *
1514		 * (bmap requires CAP_SYS_RAWIO so this does not
1515		 * represent an unprivileged user DOS attack --- we'd be
1516		 * in trouble if mortal users could trigger this path at
1517		 * will.)
1518		 *
1519		 * NB. EXT3_STATE_JDATA is not set on files other than
1520		 * regular files.  If somebody wants to bmap a directory
1521		 * or symlink and gets confused because the buffer
1522		 * hasn't yet been flushed to disk, they deserve
1523		 * everything they get.
1524		 */
1525
1526		ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
1527		journal = EXT3_JOURNAL(inode);
1528		journal_lock_updates(journal);
1529		err = journal_flush(journal);
1530		journal_unlock_updates(journal);
1531
1532		if (err)
1533			return 0;
1534	}
1535
1536	return generic_block_bmap(mapping,block,ext3_get_block);
1537}
1538
1539static int bget_one(handle_t *handle, struct buffer_head *bh)
1540{
1541	get_bh(bh);
1542	return 0;
1543}
1544
1545static int bput_one(handle_t *handle, struct buffer_head *bh)
1546{
1547	put_bh(bh);
1548	return 0;
1549}
1550
1551static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
1552{
1553	return !buffer_mapped(bh);
1554}
1555
1556/*
1557 * Note that we always start a transaction even if we're not journalling
1558 * data.  This is to preserve ordering: any hole instantiation within
1559 * __block_write_full_page -> ext3_get_block() should be journalled
1560 * along with the data so we don't crash and then get metadata which
1561 * refers to old data.
1562 *
1563 * In all journalling modes block_write_full_page() will start the I/O.
1564 *
1565 * Problem:
1566 *
1567 *	ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1568 *		ext3_writepage()
1569 *
1570 * Similar for:
1571 *
1572 *	ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1573 *
1574 * Same applies to ext3_get_block().  We will deadlock on various things like
1575 * lock_journal and i_truncate_mutex.
1576 *
1577 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1578 * allocations fail.
1579 *
1580 * 16May01: If we're reentered then journal_current_handle() will be
1581 *	    non-zero. We simply *return*.
1582 *
1583 * 1 July 2001: @@@ FIXME:
1584 *   In journalled data mode, a data buffer may be metadata against the
1585 *   current transaction.  But the same file is part of a shared mapping
1586 *   and someone does a writepage() on it.
1587 *
1588 *   We will move the buffer onto the async_data list, but *after* it has
1589 *   been dirtied. So there's a small window where we have dirty data on
1590 *   BJ_Metadata.
1591 *
1592 *   Note that this only applies to the last partial page in the file.  The
1593 *   bit which block_write_full_page() uses prepare/commit for.  (That's
1594 *   broken code anyway: it's wrong for msync()).
1595 *
1596 *   It's a rare case: affects the final partial page, for journalled data
1597 *   where the file is subject to bith write() and writepage() in the same
1598 *   transction.  To fix it we'll need a custom block_write_full_page().
1599 *   We'll probably need that anyway for journalling writepage() output.
1600 *
1601 * We don't honour synchronous mounts for writepage().  That would be
1602 * disastrous.  Any write() or metadata operation will sync the fs for
1603 * us.
1604 *
1605 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1606 * we don't need to open a transaction here.
1607 */
1608static int ext3_ordered_writepage(struct page *page,
1609				struct writeback_control *wbc)
1610{
1611	struct inode *inode = page->mapping->host;
1612	struct buffer_head *page_bufs;
1613	handle_t *handle = NULL;
1614	int ret = 0;
1615	int err;
1616
1617	J_ASSERT(PageLocked(page));
1618	/*
1619	 * We don't want to warn for emergency remount. The condition is
1620	 * ordered to avoid dereferencing inode->i_sb in non-error case to
1621	 * avoid slow-downs.
1622	 */
1623	WARN_ON_ONCE(IS_RDONLY(inode) &&
1624		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1625
1626	/*
1627	 * We give up here if we're reentered, because it might be for a
1628	 * different filesystem.
1629	 */
1630	if (ext3_journal_current_handle())
1631		goto out_fail;
1632
1633	trace_ext3_ordered_writepage(page);
1634	if (!page_has_buffers(page)) {
1635		create_empty_buffers(page, inode->i_sb->s_blocksize,
1636				(1 << BH_Dirty)|(1 << BH_Uptodate));
1637		page_bufs = page_buffers(page);
1638	} else {
1639		page_bufs = page_buffers(page);
1640		if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
1641				       NULL, buffer_unmapped)) {
1642			/* Provide NULL get_block() to catch bugs if buffers
1643			 * weren't really mapped */
1644			return block_write_full_page(page, NULL, wbc);
1645		}
1646	}
1647	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1648
1649	if (IS_ERR(handle)) {
1650		ret = PTR_ERR(handle);
1651		goto out_fail;
1652	}
1653
1654	walk_page_buffers(handle, page_bufs, 0,
1655			PAGE_CACHE_SIZE, NULL, bget_one);
1656
1657	ret = block_write_full_page(page, ext3_get_block, wbc);
1658
1659	/*
1660	 * The page can become unlocked at any point now, and
1661	 * truncate can then come in and change things.  So we
1662	 * can't touch *page from now on.  But *page_bufs is
1663	 * safe due to elevated refcount.
1664	 */
1665
1666	/*
1667	 * And attach them to the current transaction.  But only if
1668	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
1669	 * and generally junk.
1670	 */
1671	if (ret == 0) {
1672		err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1673					NULL, journal_dirty_data_fn);
1674		if (!ret)
1675			ret = err;
1676	}
1677	walk_page_buffers(handle, page_bufs, 0,
1678			PAGE_CACHE_SIZE, NULL, bput_one);
1679	err = ext3_journal_stop(handle);
1680	if (!ret)
1681		ret = err;
1682	return ret;
1683
1684out_fail:
1685	redirty_page_for_writepage(wbc, page);
1686	unlock_page(page);
1687	return ret;
1688}
1689
1690static int ext3_writeback_writepage(struct page *page,
1691				struct writeback_control *wbc)
1692{
1693	struct inode *inode = page->mapping->host;
1694	handle_t *handle = NULL;
1695	int ret = 0;
1696	int err;
1697
1698	J_ASSERT(PageLocked(page));
1699	/*
1700	 * We don't want to warn for emergency remount. The condition is
1701	 * ordered to avoid dereferencing inode->i_sb in non-error case to
1702	 * avoid slow-downs.
1703	 */
1704	WARN_ON_ONCE(IS_RDONLY(inode) &&
1705		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1706
1707	if (ext3_journal_current_handle())
1708		goto out_fail;
1709
1710	trace_ext3_writeback_writepage(page);
1711	if (page_has_buffers(page)) {
1712		if (!walk_page_buffers(NULL, page_buffers(page), 0,
1713				      PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
1714			/* Provide NULL get_block() to catch bugs if buffers
1715			 * weren't really mapped */
1716			return block_write_full_page(page, NULL, wbc);
1717		}
1718	}
1719
1720	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1721	if (IS_ERR(handle)) {
1722		ret = PTR_ERR(handle);
1723		goto out_fail;
1724	}
1725
1726	ret = block_write_full_page(page, ext3_get_block, wbc);
1727
1728	err = ext3_journal_stop(handle);
1729	if (!ret)
1730		ret = err;
1731	return ret;
1732
1733out_fail:
1734	redirty_page_for_writepage(wbc, page);
1735	unlock_page(page);
1736	return ret;
1737}
1738
1739static int ext3_journalled_writepage(struct page *page,
1740				struct writeback_control *wbc)
1741{
1742	struct inode *inode = page->mapping->host;
1743	handle_t *handle = NULL;
1744	int ret = 0;
1745	int err;
1746
1747	J_ASSERT(PageLocked(page));
1748	/*
1749	 * We don't want to warn for emergency remount. The condition is
1750	 * ordered to avoid dereferencing inode->i_sb in non-error case to
1751	 * avoid slow-downs.
1752	 */
1753	WARN_ON_ONCE(IS_RDONLY(inode) &&
1754		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1755
1756	if (ext3_journal_current_handle())
1757		goto no_write;
1758
1759	trace_ext3_journalled_writepage(page);
1760	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1761	if (IS_ERR(handle)) {
1762		ret = PTR_ERR(handle);
1763		goto no_write;
1764	}
1765
1766	if (!page_has_buffers(page) || PageChecked(page)) {
1767		/*
1768		 * It's mmapped pagecache.  Add buffers and journal it.  There
1769		 * doesn't seem much point in redirtying the page here.
1770		 */
1771		ClearPageChecked(page);
1772		ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
1773					  ext3_get_block);
1774		if (ret != 0) {
1775			ext3_journal_stop(handle);
1776			goto out_unlock;
1777		}
1778		ret = walk_page_buffers(handle, page_buffers(page), 0,
1779			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1780
1781		err = walk_page_buffers(handle, page_buffers(page), 0,
1782				PAGE_CACHE_SIZE, NULL, write_end_fn);
1783		if (ret == 0)
1784			ret = err;
1785		ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1786		atomic_set(&EXT3_I(inode)->i_datasync_tid,
1787			   handle->h_transaction->t_tid);
1788		unlock_page(page);
1789	} else {
1790		/*
1791		 * It may be a page full of checkpoint-mode buffers.  We don't
1792		 * really know unless we go poke around in the buffer_heads.
1793		 * But block_write_full_page will do the right thing.
1794		 */
1795		ret = block_write_full_page(page, ext3_get_block, wbc);
1796	}
1797	err = ext3_journal_stop(handle);
1798	if (!ret)
1799		ret = err;
1800out:
1801	return ret;
1802
1803no_write:
1804	redirty_page_for_writepage(wbc, page);
1805out_unlock:
1806	unlock_page(page);
1807	goto out;
1808}
1809
1810static int ext3_readpage(struct file *file, struct page *page)
1811{
1812	trace_ext3_readpage(page);
1813	return mpage_readpage(page, ext3_get_block);
1814}
1815
1816static int
1817ext3_readpages(struct file *file, struct address_space *mapping,
1818		struct list_head *pages, unsigned nr_pages)
1819{
1820	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1821}
1822
1823static void ext3_invalidatepage(struct page *page, unsigned long offset)
1824{
1825	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1826
1827	trace_ext3_invalidatepage(page, offset);
1828
1829	/*
1830	 * If it's a full truncate we just forget about the pending dirtying
1831	 */
1832	if (offset == 0)
1833		ClearPageChecked(page);
1834
1835	journal_invalidatepage(journal, page, offset);
1836}
1837
1838static int ext3_releasepage(struct page *page, gfp_t wait)
1839{
1840	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1841
1842	trace_ext3_releasepage(page);
1843	WARN_ON(PageChecked(page));
1844	if (!page_has_buffers(page))
1845		return 0;
1846	return journal_try_to_free_buffers(journal, page, wait);
1847}
1848
1849/*
1850 * If the O_DIRECT write will extend the file then add this inode to the
1851 * orphan list.  So recovery will truncate it back to the original size
1852 * if the machine crashes during the write.
1853 *
1854 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1855 * crashes then stale disk data _may_ be exposed inside the file. But current
1856 * VFS code falls back into buffered path in that case so we are safe.
1857 */
1858static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1859			const struct iovec *iov, loff_t offset,
1860			unsigned long nr_segs)
1861{
1862	struct file *file = iocb->ki_filp;
1863	struct inode *inode = file->f_mapping->host;
1864	struct ext3_inode_info *ei = EXT3_I(inode);
1865	handle_t *handle;
1866	ssize_t ret;
1867	int orphan = 0;
1868	size_t count = iov_length(iov, nr_segs);
1869	int retries = 0;
1870
1871	trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
1872
1873	if (rw == WRITE) {
1874		loff_t final_size = offset + count;
1875
1876		if (final_size > inode->i_size) {
1877			/* Credits for sb + inode write */
1878			handle = ext3_journal_start(inode, 2);
1879			if (IS_ERR(handle)) {
1880				ret = PTR_ERR(handle);
1881				goto out;
1882			}
1883			ret = ext3_orphan_add(handle, inode);
1884			if (ret) {
1885				ext3_journal_stop(handle);
1886				goto out;
1887			}
1888			orphan = 1;
1889			ei->i_disksize = inode->i_size;
1890			ext3_journal_stop(handle);
1891		}
1892	}
1893
1894retry:
1895	ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1896				 ext3_get_block);
1897	/*
1898	 * In case of error extending write may have instantiated a few
1899	 * blocks outside i_size. Trim these off again.
1900	 */
1901	if (unlikely((rw & WRITE) && ret < 0)) {
1902		loff_t isize = i_size_read(inode);
1903		loff_t end = offset + iov_length(iov, nr_segs);
1904
1905		if (end > isize)
1906			ext3_truncate_failed_direct_write(inode);
1907	}
1908	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1909		goto retry;
1910
1911	if (orphan) {
1912		int err;
1913
1914		/* Credits for sb + inode write */
1915		handle = ext3_journal_start(inode, 2);
1916		if (IS_ERR(handle)) {
1917			/* This is really bad luck. We've written the data
1918			 * but cannot extend i_size. Truncate allocated blocks
1919			 * and pretend the write failed... */
1920			ext3_truncate_failed_direct_write(inode);
1921			ret = PTR_ERR(handle);
1922			goto out;
1923		}
1924		if (inode->i_nlink)
1925			ext3_orphan_del(handle, inode);
1926		if (ret > 0) {
1927			loff_t end = offset + ret;
1928			if (end > inode->i_size) {
1929				ei->i_disksize = end;
1930				i_size_write(inode, end);
1931				/*
1932				 * We're going to return a positive `ret'
1933				 * here due to non-zero-length I/O, so there's
1934				 * no way of reporting error returns from
1935				 * ext3_mark_inode_dirty() to userspace.  So
1936				 * ignore it.
1937				 */
1938				ext3_mark_inode_dirty(handle, inode);
1939			}
1940		}
1941		err = ext3_journal_stop(handle);
1942		if (ret == 0)
1943			ret = err;
1944	}
1945out:
1946	trace_ext3_direct_IO_exit(inode, offset,
1947				iov_length(iov, nr_segs), rw, ret);
1948	return ret;
1949}
1950
1951/*
1952 * Pages can be marked dirty completely asynchronously from ext3's journalling
1953 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1954 * much here because ->set_page_dirty is called under VFS locks.  The page is
1955 * not necessarily locked.
1956 *
1957 * We cannot just dirty the page and leave attached buffers clean, because the
1958 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1959 * or jbddirty because all the journalling code will explode.
1960 *
1961 * So what we do is to mark the page "pending dirty" and next time writepage
1962 * is called, propagate that into the buffers appropriately.
1963 */
1964static int ext3_journalled_set_page_dirty(struct page *page)
1965{
1966	SetPageChecked(page);
1967	return __set_page_dirty_nobuffers(page);
1968}
1969
1970static const struct address_space_operations ext3_ordered_aops = {
1971	.readpage		= ext3_readpage,
1972	.readpages		= ext3_readpages,
1973	.writepage		= ext3_ordered_writepage,
1974	.write_begin		= ext3_write_begin,
1975	.write_end		= ext3_ordered_write_end,
1976	.bmap			= ext3_bmap,
1977	.invalidatepage		= ext3_invalidatepage,
1978	.releasepage		= ext3_releasepage,
1979	.direct_IO		= ext3_direct_IO,
1980	.migratepage		= buffer_migrate_page,
1981	.is_partially_uptodate  = block_is_partially_uptodate,
1982	.error_remove_page	= generic_error_remove_page,
1983};
1984
1985static const struct address_space_operations ext3_writeback_aops = {
1986	.readpage		= ext3_readpage,
1987	.readpages		= ext3_readpages,
1988	.writepage		= ext3_writeback_writepage,
1989	.write_begin		= ext3_write_begin,
1990	.write_end		= ext3_writeback_write_end,
1991	.bmap			= ext3_bmap,
1992	.invalidatepage		= ext3_invalidatepage,
1993	.releasepage		= ext3_releasepage,
1994	.direct_IO		= ext3_direct_IO,
1995	.migratepage		= buffer_migrate_page,
1996	.is_partially_uptodate  = block_is_partially_uptodate,
1997	.error_remove_page	= generic_error_remove_page,
1998};
1999
2000static const struct address_space_operations ext3_journalled_aops = {
2001	.readpage		= ext3_readpage,
2002	.readpages		= ext3_readpages,
2003	.writepage		= ext3_journalled_writepage,
2004	.write_begin		= ext3_write_begin,
2005	.write_end		= ext3_journalled_write_end,
2006	.set_page_dirty		= ext3_journalled_set_page_dirty,
2007	.bmap			= ext3_bmap,
2008	.invalidatepage		= ext3_invalidatepage,
2009	.releasepage		= ext3_releasepage,
2010	.is_partially_uptodate  = block_is_partially_uptodate,
2011	.error_remove_page	= generic_error_remove_page,
2012};
2013
2014void ext3_set_aops(struct inode *inode)
2015{
2016	if (ext3_should_order_data(inode))
2017		inode->i_mapping->a_ops = &ext3_ordered_aops;
2018	else if (ext3_should_writeback_data(inode))
2019		inode->i_mapping->a_ops = &ext3_writeback_aops;
2020	else
2021		inode->i_mapping->a_ops = &ext3_journalled_aops;
2022}
2023
2024/*
2025 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
2026 * up to the end of the block which corresponds to `from'.
2027 * This required during truncate. We need to physically zero the tail end
2028 * of that block so it doesn't yield old data if the file is later grown.
2029 */
2030static int ext3_block_truncate_page(struct inode *inode, loff_t from)
2031{
2032	ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
2033	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
2034	unsigned blocksize, iblock, length, pos;
2035	struct page *page;
2036	handle_t *handle = NULL;
2037	struct buffer_head *bh;
2038	int err = 0;
2039
2040	/* Truncated on block boundary - nothing to do */
2041	blocksize = inode->i_sb->s_blocksize;
2042	if ((from & (blocksize - 1)) == 0)
2043		return 0;
2044
2045	page = grab_cache_page(inode->i_mapping, index);
2046	if (!page)
2047		return -ENOMEM;
2048	length = blocksize - (offset & (blocksize - 1));
2049	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
2050
2051	if (!page_has_buffers(page))
2052		create_empty_buffers(page, blocksize, 0);
2053
2054	/* Find the buffer that contains "offset" */
2055	bh = page_buffers(page);
2056	pos = blocksize;
2057	while (offset >= pos) {
2058		bh = bh->b_this_page;
2059		iblock++;
2060		pos += blocksize;
2061	}
2062
2063	err = 0;
2064	if (buffer_freed(bh)) {
2065		BUFFER_TRACE(bh, "freed: skip");
2066		goto unlock;
2067	}
2068
2069	if (!buffer_mapped(bh)) {
2070		BUFFER_TRACE(bh, "unmapped");
2071		ext3_get_block(inode, iblock, bh, 0);
2072		/* unmapped? It's a hole - nothing to do */
2073		if (!buffer_mapped(bh)) {
2074			BUFFER_TRACE(bh, "still unmapped");
2075			goto unlock;
2076		}
2077	}
2078
2079	/* Ok, it's mapped. Make sure it's up-to-date */
2080	if (PageUptodate(page))
2081		set_buffer_uptodate(bh);
2082
2083	if (!bh_uptodate_or_lock(bh)) {
2084		err = bh_submit_read(bh);
 
 
2085		/* Uhhuh. Read error. Complain and punt. */
2086		if (err)
2087			goto unlock;
2088	}
2089
2090	/* data=writeback mode doesn't need transaction to zero-out data */
2091	if (!ext3_should_writeback_data(inode)) {
2092		/* We journal at most one block */
2093		handle = ext3_journal_start(inode, 1);
2094		if (IS_ERR(handle)) {
2095			clear_highpage(page);
2096			flush_dcache_page(page);
2097			err = PTR_ERR(handle);
2098			goto unlock;
2099		}
2100	}
2101
2102	if (ext3_should_journal_data(inode)) {
2103		BUFFER_TRACE(bh, "get write access");
2104		err = ext3_journal_get_write_access(handle, bh);
2105		if (err)
2106			goto stop;
2107	}
2108
2109	zero_user(page, offset, length);
2110	BUFFER_TRACE(bh, "zeroed end of block");
2111
2112	err = 0;
2113	if (ext3_should_journal_data(inode)) {
2114		err = ext3_journal_dirty_metadata(handle, bh);
2115	} else {
2116		if (ext3_should_order_data(inode))
2117			err = ext3_journal_dirty_data(handle, bh);
2118		mark_buffer_dirty(bh);
2119	}
2120stop:
2121	if (handle)
2122		ext3_journal_stop(handle);
2123
2124unlock:
2125	unlock_page(page);
2126	page_cache_release(page);
2127	return err;
2128}
2129
2130/*
2131 * Probably it should be a library function... search for first non-zero word
2132 * or memcmp with zero_page, whatever is better for particular architecture.
2133 * Linus?
2134 */
2135static inline int all_zeroes(__le32 *p, __le32 *q)
2136{
2137	while (p < q)
2138		if (*p++)
2139			return 0;
2140	return 1;
2141}
2142
2143/**
2144 *	ext3_find_shared - find the indirect blocks for partial truncation.
2145 *	@inode:	  inode in question
2146 *	@depth:	  depth of the affected branch
2147 *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
2148 *	@chain:	  place to store the pointers to partial indirect blocks
2149 *	@top:	  place to the (detached) top of branch
2150 *
2151 *	This is a helper function used by ext3_truncate().
2152 *
2153 *	When we do truncate() we may have to clean the ends of several
2154 *	indirect blocks but leave the blocks themselves alive. Block is
2155 *	partially truncated if some data below the new i_size is referred
2156 *	from it (and it is on the path to the first completely truncated
2157 *	data block, indeed).  We have to free the top of that path along
2158 *	with everything to the right of the path. Since no allocation
2159 *	past the truncation point is possible until ext3_truncate()
2160 *	finishes, we may safely do the latter, but top of branch may
2161 *	require special attention - pageout below the truncation point
2162 *	might try to populate it.
2163 *
2164 *	We atomically detach the top of branch from the tree, store the
2165 *	block number of its root in *@top, pointers to buffer_heads of
2166 *	partially truncated blocks - in @chain[].bh and pointers to
2167 *	their last elements that should not be removed - in
2168 *	@chain[].p. Return value is the pointer to last filled element
2169 *	of @chain.
2170 *
2171 *	The work left to caller to do the actual freeing of subtrees:
2172 *		a) free the subtree starting from *@top
2173 *		b) free the subtrees whose roots are stored in
2174 *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
2175 *		c) free the subtrees growing from the inode past the @chain[0].
2176 *			(no partially truncated stuff there).  */
2177
2178static Indirect *ext3_find_shared(struct inode *inode, int depth,
2179			int offsets[4], Indirect chain[4], __le32 *top)
2180{
2181	Indirect *partial, *p;
2182	int k, err;
2183
2184	*top = 0;
2185	/* Make k index the deepest non-null offset + 1 */
2186	for (k = depth; k > 1 && !offsets[k-1]; k--)
2187		;
2188	partial = ext3_get_branch(inode, k, offsets, chain, &err);
2189	/* Writer: pointers */
2190	if (!partial)
2191		partial = chain + k-1;
2192	/*
2193	 * If the branch acquired continuation since we've looked at it -
2194	 * fine, it should all survive and (new) top doesn't belong to us.
2195	 */
2196	if (!partial->key && *partial->p)
2197		/* Writer: end */
2198		goto no_top;
2199	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2200		;
2201	/*
2202	 * OK, we've found the last block that must survive. The rest of our
2203	 * branch should be detached before unlocking. However, if that rest
2204	 * of branch is all ours and does not grow immediately from the inode
2205	 * it's easier to cheat and just decrement partial->p.
2206	 */
2207	if (p == chain + k - 1 && p > chain) {
2208		p->p--;
2209	} else {
2210		*top = *p->p;
2211		/* Nope, don't do this in ext3.  Must leave the tree intact */
2212#if 0
2213		*p->p = 0;
2214#endif
2215	}
2216	/* Writer: end */
2217
2218	while(partial > p) {
2219		brelse(partial->bh);
2220		partial--;
2221	}
2222no_top:
2223	return partial;
2224}
2225
2226/*
2227 * Zero a number of block pointers in either an inode or an indirect block.
2228 * If we restart the transaction we must again get write access to the
2229 * indirect block for further modification.
2230 *
2231 * We release `count' blocks on disk, but (last - first) may be greater
2232 * than `count' because there can be holes in there.
2233 */
2234static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2235		struct buffer_head *bh, ext3_fsblk_t block_to_free,
2236		unsigned long count, __le32 *first, __le32 *last)
2237{
2238	__le32 *p;
2239	if (try_to_extend_transaction(handle, inode)) {
2240		if (bh) {
2241			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2242			if (ext3_journal_dirty_metadata(handle, bh))
2243				return;
2244		}
2245		ext3_mark_inode_dirty(handle, inode);
2246		truncate_restart_transaction(handle, inode);
2247		if (bh) {
2248			BUFFER_TRACE(bh, "retaking write access");
2249			if (ext3_journal_get_write_access(handle, bh))
2250				return;
2251		}
2252	}
2253
2254	/*
2255	 * Any buffers which are on the journal will be in memory. We find
2256	 * them on the hash table so journal_revoke() will run journal_forget()
2257	 * on them.  We've already detached each block from the file, so
2258	 * bforget() in journal_forget() should be safe.
2259	 *
2260	 * AKPM: turn on bforget in journal_forget()!!!
2261	 */
2262	for (p = first; p < last; p++) {
2263		u32 nr = le32_to_cpu(*p);
2264		if (nr) {
2265			struct buffer_head *bh;
2266
2267			*p = 0;
2268			bh = sb_find_get_block(inode->i_sb, nr);
2269			ext3_forget(handle, 0, inode, bh, nr);
2270		}
2271	}
2272
2273	ext3_free_blocks(handle, inode, block_to_free, count);
2274}
2275
2276/**
2277 * ext3_free_data - free a list of data blocks
2278 * @handle:	handle for this transaction
2279 * @inode:	inode we are dealing with
2280 * @this_bh:	indirect buffer_head which contains *@first and *@last
2281 * @first:	array of block numbers
2282 * @last:	points immediately past the end of array
2283 *
2284 * We are freeing all blocks referred from that array (numbers are stored as
2285 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2286 *
2287 * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2288 * blocks are contiguous then releasing them at one time will only affect one
2289 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2290 * actually use a lot of journal space.
2291 *
2292 * @this_bh will be %NULL if @first and @last point into the inode's direct
2293 * block pointers.
2294 */
2295static void ext3_free_data(handle_t *handle, struct inode *inode,
2296			   struct buffer_head *this_bh,
2297			   __le32 *first, __le32 *last)
2298{
2299	ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2300	unsigned long count = 0;	    /* Number of blocks in the run */
2301	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
2302					       corresponding to
2303					       block_to_free */
2304	ext3_fsblk_t nr;		    /* Current block # */
2305	__le32 *p;			    /* Pointer into inode/ind
2306					       for current block */
2307	int err;
2308
2309	if (this_bh) {				/* For indirect block */
2310		BUFFER_TRACE(this_bh, "get_write_access");
2311		err = ext3_journal_get_write_access(handle, this_bh);
2312		/* Important: if we can't update the indirect pointers
2313		 * to the blocks, we can't free them. */
2314		if (err)
2315			return;
2316	}
2317
2318	for (p = first; p < last; p++) {
2319		nr = le32_to_cpu(*p);
2320		if (nr) {
2321			/* accumulate blocks to free if they're contiguous */
2322			if (count == 0) {
2323				block_to_free = nr;
2324				block_to_free_p = p;
2325				count = 1;
2326			} else if (nr == block_to_free + count) {
2327				count++;
2328			} else {
2329				ext3_clear_blocks(handle, inode, this_bh,
2330						  block_to_free,
2331						  count, block_to_free_p, p);
2332				block_to_free = nr;
2333				block_to_free_p = p;
2334				count = 1;
2335			}
2336		}
2337	}
2338
2339	if (count > 0)
2340		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2341				  count, block_to_free_p, p);
2342
2343	if (this_bh) {
2344		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2345
2346		/*
2347		 * The buffer head should have an attached journal head at this
2348		 * point. However, if the data is corrupted and an indirect
2349		 * block pointed to itself, it would have been detached when
2350		 * the block was cleared. Check for this instead of OOPSing.
2351		 */
2352		if (bh2jh(this_bh))
2353			ext3_journal_dirty_metadata(handle, this_bh);
2354		else
2355			ext3_error(inode->i_sb, "ext3_free_data",
2356				   "circular indirect block detected, "
2357				   "inode=%lu, block=%llu",
2358				   inode->i_ino,
2359				   (unsigned long long)this_bh->b_blocknr);
2360	}
2361}
2362
2363/**
2364 *	ext3_free_branches - free an array of branches
2365 *	@handle: JBD handle for this transaction
2366 *	@inode:	inode we are dealing with
2367 *	@parent_bh: the buffer_head which contains *@first and *@last
2368 *	@first:	array of block numbers
2369 *	@last:	pointer immediately past the end of array
2370 *	@depth:	depth of the branches to free
2371 *
2372 *	We are freeing all blocks referred from these branches (numbers are
2373 *	stored as little-endian 32-bit) and updating @inode->i_blocks
2374 *	appropriately.
2375 */
2376static void ext3_free_branches(handle_t *handle, struct inode *inode,
2377			       struct buffer_head *parent_bh,
2378			       __le32 *first, __le32 *last, int depth)
2379{
2380	ext3_fsblk_t nr;
2381	__le32 *p;
2382
2383	if (is_handle_aborted(handle))
2384		return;
2385
2386	if (depth--) {
2387		struct buffer_head *bh;
2388		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2389		p = last;
2390		while (--p >= first) {
2391			nr = le32_to_cpu(*p);
2392			if (!nr)
2393				continue;		/* A hole */
2394
2395			/* Go read the buffer for the next level down */
2396			bh = sb_bread(inode->i_sb, nr);
2397
2398			/*
2399			 * A read failure? Report error and clear slot
2400			 * (should be rare).
2401			 */
2402			if (!bh) {
2403				ext3_error(inode->i_sb, "ext3_free_branches",
2404					   "Read failure, inode=%lu, block="E3FSBLK,
2405					   inode->i_ino, nr);
2406				continue;
2407			}
2408
2409			/* This zaps the entire block.  Bottom up. */
2410			BUFFER_TRACE(bh, "free child branches");
2411			ext3_free_branches(handle, inode, bh,
2412					   (__le32*)bh->b_data,
2413					   (__le32*)bh->b_data + addr_per_block,
2414					   depth);
2415
2416			/*
2417			 * Everything below this this pointer has been
2418			 * released.  Now let this top-of-subtree go.
2419			 *
2420			 * We want the freeing of this indirect block to be
2421			 * atomic in the journal with the updating of the
2422			 * bitmap block which owns it.  So make some room in
2423			 * the journal.
2424			 *
2425			 * We zero the parent pointer *after* freeing its
2426			 * pointee in the bitmaps, so if extend_transaction()
2427			 * for some reason fails to put the bitmap changes and
2428			 * the release into the same transaction, recovery
2429			 * will merely complain about releasing a free block,
2430			 * rather than leaking blocks.
2431			 */
2432			if (is_handle_aborted(handle))
2433				return;
2434			if (try_to_extend_transaction(handle, inode)) {
2435				ext3_mark_inode_dirty(handle, inode);
2436				truncate_restart_transaction(handle, inode);
2437			}
2438
2439			/*
2440			 * We've probably journalled the indirect block several
2441			 * times during the truncate.  But it's no longer
2442			 * needed and we now drop it from the transaction via
2443			 * journal_revoke().
2444			 *
2445			 * That's easy if it's exclusively part of this
2446			 * transaction.  But if it's part of the committing
2447			 * transaction then journal_forget() will simply
2448			 * brelse() it.  That means that if the underlying
2449			 * block is reallocated in ext3_get_block(),
2450			 * unmap_underlying_metadata() will find this block
2451			 * and will try to get rid of it.  damn, damn. Thus
2452			 * we don't allow a block to be reallocated until
2453			 * a transaction freeing it has fully committed.
2454			 *
2455			 * We also have to make sure journal replay after a
2456			 * crash does not overwrite non-journaled data blocks
2457			 * with old metadata when the block got reallocated for
2458			 * data.  Thus we have to store a revoke record for a
2459			 * block in the same transaction in which we free the
2460			 * block.
2461			 */
2462			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2463
2464			ext3_free_blocks(handle, inode, nr, 1);
2465
2466			if (parent_bh) {
2467				/*
2468				 * The block which we have just freed is
2469				 * pointed to by an indirect block: journal it
2470				 */
2471				BUFFER_TRACE(parent_bh, "get_write_access");
2472				if (!ext3_journal_get_write_access(handle,
2473								   parent_bh)){
2474					*p = 0;
2475					BUFFER_TRACE(parent_bh,
2476					"call ext3_journal_dirty_metadata");
2477					ext3_journal_dirty_metadata(handle,
2478								    parent_bh);
2479				}
2480			}
2481		}
2482	} else {
2483		/* We have reached the bottom of the tree. */
2484		BUFFER_TRACE(parent_bh, "free data blocks");
2485		ext3_free_data(handle, inode, parent_bh, first, last);
2486	}
2487}
2488
2489int ext3_can_truncate(struct inode *inode)
2490{
2491	if (S_ISREG(inode->i_mode))
2492		return 1;
2493	if (S_ISDIR(inode->i_mode))
2494		return 1;
2495	if (S_ISLNK(inode->i_mode))
2496		return !ext3_inode_is_fast_symlink(inode);
2497	return 0;
2498}
2499
2500/*
2501 * ext3_truncate()
2502 *
2503 * We block out ext3_get_block() block instantiations across the entire
2504 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2505 * simultaneously on behalf of the same inode.
2506 *
2507 * As we work through the truncate and commit bits of it to the journal there
2508 * is one core, guiding principle: the file's tree must always be consistent on
2509 * disk.  We must be able to restart the truncate after a crash.
2510 *
2511 * The file's tree may be transiently inconsistent in memory (although it
2512 * probably isn't), but whenever we close off and commit a journal transaction,
2513 * the contents of (the filesystem + the journal) must be consistent and
2514 * restartable.  It's pretty simple, really: bottom up, right to left (although
2515 * left-to-right works OK too).
2516 *
2517 * Note that at recovery time, journal replay occurs *before* the restart of
2518 * truncate against the orphan inode list.
2519 *
2520 * The committed inode has the new, desired i_size (which is the same as
2521 * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2522 * that this inode's truncate did not complete and it will again call
2523 * ext3_truncate() to have another go.  So there will be instantiated blocks
2524 * to the right of the truncation point in a crashed ext3 filesystem.  But
2525 * that's fine - as long as they are linked from the inode, the post-crash
2526 * ext3_truncate() run will find them and release them.
2527 */
2528void ext3_truncate(struct inode *inode)
2529{
2530	handle_t *handle;
2531	struct ext3_inode_info *ei = EXT3_I(inode);
2532	__le32 *i_data = ei->i_data;
2533	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2534	int offsets[4];
2535	Indirect chain[4];
2536	Indirect *partial;
2537	__le32 nr = 0;
2538	int n;
2539	long last_block;
2540	unsigned blocksize = inode->i_sb->s_blocksize;
2541
2542	trace_ext3_truncate_enter(inode);
2543
2544	if (!ext3_can_truncate(inode))
2545		goto out_notrans;
2546
2547	if (inode->i_size == 0 && ext3_should_writeback_data(inode))
2548		ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
2549
2550	handle = start_transaction(inode);
2551	if (IS_ERR(handle))
2552		goto out_notrans;
2553
2554	last_block = (inode->i_size + blocksize-1)
2555					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2556	n = ext3_block_to_path(inode, last_block, offsets, NULL);
2557	if (n == 0)
2558		goto out_stop;	/* error */
2559
2560	/*
2561	 * OK.  This truncate is going to happen.  We add the inode to the
2562	 * orphan list, so that if this truncate spans multiple transactions,
2563	 * and we crash, we will resume the truncate when the filesystem
2564	 * recovers.  It also marks the inode dirty, to catch the new size.
2565	 *
2566	 * Implication: the file must always be in a sane, consistent
2567	 * truncatable state while each transaction commits.
2568	 */
2569	if (ext3_orphan_add(handle, inode))
2570		goto out_stop;
2571
2572	/*
2573	 * The orphan list entry will now protect us from any crash which
2574	 * occurs before the truncate completes, so it is now safe to propagate
2575	 * the new, shorter inode size (held for now in i_size) into the
2576	 * on-disk inode. We do this via i_disksize, which is the value which
2577	 * ext3 *really* writes onto the disk inode.
2578	 */
2579	ei->i_disksize = inode->i_size;
2580
2581	/*
2582	 * From here we block out all ext3_get_block() callers who want to
2583	 * modify the block allocation tree.
2584	 */
2585	mutex_lock(&ei->truncate_mutex);
2586
2587	if (n == 1) {		/* direct blocks */
2588		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2589			       i_data + EXT3_NDIR_BLOCKS);
2590		goto do_indirects;
2591	}
2592
2593	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2594	/* Kill the top of shared branch (not detached) */
2595	if (nr) {
2596		if (partial == chain) {
2597			/* Shared branch grows from the inode */
2598			ext3_free_branches(handle, inode, NULL,
2599					   &nr, &nr+1, (chain+n-1) - partial);
2600			*partial->p = 0;
2601			/*
2602			 * We mark the inode dirty prior to restart,
2603			 * and prior to stop.  No need for it here.
2604			 */
2605		} else {
2606			/* Shared branch grows from an indirect block */
2607			ext3_free_branches(handle, inode, partial->bh,
2608					partial->p,
2609					partial->p+1, (chain+n-1) - partial);
2610		}
2611	}
2612	/* Clear the ends of indirect blocks on the shared branch */
2613	while (partial > chain) {
2614		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2615				   (__le32*)partial->bh->b_data+addr_per_block,
2616				   (chain+n-1) - partial);
2617		BUFFER_TRACE(partial->bh, "call brelse");
2618		brelse (partial->bh);
2619		partial--;
2620	}
2621do_indirects:
2622	/* Kill the remaining (whole) subtrees */
2623	switch (offsets[0]) {
2624	default:
2625		nr = i_data[EXT3_IND_BLOCK];
2626		if (nr) {
2627			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2628			i_data[EXT3_IND_BLOCK] = 0;
2629		}
2630	case EXT3_IND_BLOCK:
2631		nr = i_data[EXT3_DIND_BLOCK];
2632		if (nr) {
2633			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2634			i_data[EXT3_DIND_BLOCK] = 0;
2635		}
2636	case EXT3_DIND_BLOCK:
2637		nr = i_data[EXT3_TIND_BLOCK];
2638		if (nr) {
2639			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2640			i_data[EXT3_TIND_BLOCK] = 0;
2641		}
2642	case EXT3_TIND_BLOCK:
2643		;
2644	}
2645
2646	ext3_discard_reservation(inode);
2647
2648	mutex_unlock(&ei->truncate_mutex);
2649	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2650	ext3_mark_inode_dirty(handle, inode);
2651
2652	/*
2653	 * In a multi-transaction truncate, we only make the final transaction
2654	 * synchronous
2655	 */
2656	if (IS_SYNC(inode))
2657		handle->h_sync = 1;
2658out_stop:
2659	/*
2660	 * If this was a simple ftruncate(), and the file will remain alive
2661	 * then we need to clear up the orphan record which we created above.
2662	 * However, if this was a real unlink then we were called by
2663	 * ext3_evict_inode(), and we allow that function to clean up the
2664	 * orphan info for us.
2665	 */
2666	if (inode->i_nlink)
2667		ext3_orphan_del(handle, inode);
2668
2669	ext3_journal_stop(handle);
2670	trace_ext3_truncate_exit(inode);
2671	return;
2672out_notrans:
2673	/*
2674	 * Delete the inode from orphan list so that it doesn't stay there
2675	 * forever and trigger assertion on umount.
2676	 */
2677	if (inode->i_nlink)
2678		ext3_orphan_del(NULL, inode);
2679	trace_ext3_truncate_exit(inode);
2680}
2681
2682static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2683		unsigned long ino, struct ext3_iloc *iloc)
2684{
2685	unsigned long block_group;
2686	unsigned long offset;
2687	ext3_fsblk_t block;
2688	struct ext3_group_desc *gdp;
2689
2690	if (!ext3_valid_inum(sb, ino)) {
2691		/*
2692		 * This error is already checked for in namei.c unless we are
2693		 * looking at an NFS filehandle, in which case no error
2694		 * report is needed
2695		 */
2696		return 0;
2697	}
2698
2699	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2700	gdp = ext3_get_group_desc(sb, block_group, NULL);
2701	if (!gdp)
2702		return 0;
2703	/*
2704	 * Figure out the offset within the block group inode table
2705	 */
2706	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2707		EXT3_INODE_SIZE(sb);
2708	block = le32_to_cpu(gdp->bg_inode_table) +
2709		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
2710
2711	iloc->block_group = block_group;
2712	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2713	return block;
2714}
2715
2716/*
2717 * ext3_get_inode_loc returns with an extra refcount against the inode's
2718 * underlying buffer_head on success. If 'in_mem' is true, we have all
2719 * data in memory that is needed to recreate the on-disk version of this
2720 * inode.
2721 */
2722static int __ext3_get_inode_loc(struct inode *inode,
2723				struct ext3_iloc *iloc, int in_mem)
2724{
2725	ext3_fsblk_t block;
2726	struct buffer_head *bh;
2727
2728	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2729	if (!block)
2730		return -EIO;
2731
2732	bh = sb_getblk(inode->i_sb, block);
2733	if (!bh) {
2734		ext3_error (inode->i_sb, "ext3_get_inode_loc",
2735				"unable to read inode block - "
2736				"inode=%lu, block="E3FSBLK,
2737				 inode->i_ino, block);
2738		return -EIO;
2739	}
2740	if (!buffer_uptodate(bh)) {
2741		lock_buffer(bh);
2742
2743		/*
2744		 * If the buffer has the write error flag, we have failed
2745		 * to write out another inode in the same block.  In this
2746		 * case, we don't have to read the block because we may
2747		 * read the old inode data successfully.
2748		 */
2749		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2750			set_buffer_uptodate(bh);
2751
2752		if (buffer_uptodate(bh)) {
2753			/* someone brought it uptodate while we waited */
2754			unlock_buffer(bh);
2755			goto has_buffer;
2756		}
2757
2758		/*
2759		 * If we have all information of the inode in memory and this
2760		 * is the only valid inode in the block, we need not read the
2761		 * block.
2762		 */
2763		if (in_mem) {
2764			struct buffer_head *bitmap_bh;
2765			struct ext3_group_desc *desc;
2766			int inodes_per_buffer;
2767			int inode_offset, i;
2768			int block_group;
2769			int start;
2770
2771			block_group = (inode->i_ino - 1) /
2772					EXT3_INODES_PER_GROUP(inode->i_sb);
2773			inodes_per_buffer = bh->b_size /
2774				EXT3_INODE_SIZE(inode->i_sb);
2775			inode_offset = ((inode->i_ino - 1) %
2776					EXT3_INODES_PER_GROUP(inode->i_sb));
2777			start = inode_offset & ~(inodes_per_buffer - 1);
2778
2779			/* Is the inode bitmap in cache? */
2780			desc = ext3_get_group_desc(inode->i_sb,
2781						block_group, NULL);
2782			if (!desc)
2783				goto make_io;
2784
2785			bitmap_bh = sb_getblk(inode->i_sb,
2786					le32_to_cpu(desc->bg_inode_bitmap));
2787			if (!bitmap_bh)
2788				goto make_io;
2789
2790			/*
2791			 * If the inode bitmap isn't in cache then the
2792			 * optimisation may end up performing two reads instead
2793			 * of one, so skip it.
2794			 */
2795			if (!buffer_uptodate(bitmap_bh)) {
2796				brelse(bitmap_bh);
2797				goto make_io;
2798			}
2799			for (i = start; i < start + inodes_per_buffer; i++) {
2800				if (i == inode_offset)
2801					continue;
2802				if (ext3_test_bit(i, bitmap_bh->b_data))
2803					break;
2804			}
2805			brelse(bitmap_bh);
2806			if (i == start + inodes_per_buffer) {
2807				/* all other inodes are free, so skip I/O */
2808				memset(bh->b_data, 0, bh->b_size);
2809				set_buffer_uptodate(bh);
2810				unlock_buffer(bh);
2811				goto has_buffer;
2812			}
2813		}
2814
2815make_io:
2816		/*
2817		 * There are other valid inodes in the buffer, this inode
2818		 * has in-inode xattrs, or we don't have this inode in memory.
2819		 * Read the block from disk.
2820		 */
2821		trace_ext3_load_inode(inode);
2822		get_bh(bh);
2823		bh->b_end_io = end_buffer_read_sync;
2824		submit_bh(READ | REQ_META | REQ_PRIO, bh);
2825		wait_on_buffer(bh);
2826		if (!buffer_uptodate(bh)) {
2827			ext3_error(inode->i_sb, "ext3_get_inode_loc",
2828					"unable to read inode block - "
2829					"inode=%lu, block="E3FSBLK,
2830					inode->i_ino, block);
2831			brelse(bh);
2832			return -EIO;
2833		}
2834	}
2835has_buffer:
2836	iloc->bh = bh;
2837	return 0;
2838}
2839
2840int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2841{
2842	/* We have all inode data except xattrs in memory here. */
2843	return __ext3_get_inode_loc(inode, iloc,
2844		!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
2845}
2846
2847void ext3_set_inode_flags(struct inode *inode)
2848{
2849	unsigned int flags = EXT3_I(inode)->i_flags;
2850
2851	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2852	if (flags & EXT3_SYNC_FL)
2853		inode->i_flags |= S_SYNC;
2854	if (flags & EXT3_APPEND_FL)
2855		inode->i_flags |= S_APPEND;
2856	if (flags & EXT3_IMMUTABLE_FL)
2857		inode->i_flags |= S_IMMUTABLE;
2858	if (flags & EXT3_NOATIME_FL)
2859		inode->i_flags |= S_NOATIME;
2860	if (flags & EXT3_DIRSYNC_FL)
2861		inode->i_flags |= S_DIRSYNC;
2862}
2863
2864/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2865void ext3_get_inode_flags(struct ext3_inode_info *ei)
2866{
2867	unsigned int flags = ei->vfs_inode.i_flags;
2868
2869	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2870			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2871	if (flags & S_SYNC)
2872		ei->i_flags |= EXT3_SYNC_FL;
2873	if (flags & S_APPEND)
2874		ei->i_flags |= EXT3_APPEND_FL;
2875	if (flags & S_IMMUTABLE)
2876		ei->i_flags |= EXT3_IMMUTABLE_FL;
2877	if (flags & S_NOATIME)
2878		ei->i_flags |= EXT3_NOATIME_FL;
2879	if (flags & S_DIRSYNC)
2880		ei->i_flags |= EXT3_DIRSYNC_FL;
2881}
2882
2883struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2884{
2885	struct ext3_iloc iloc;
2886	struct ext3_inode *raw_inode;
2887	struct ext3_inode_info *ei;
2888	struct buffer_head *bh;
2889	struct inode *inode;
2890	journal_t *journal = EXT3_SB(sb)->s_journal;
2891	transaction_t *transaction;
2892	long ret;
2893	int block;
2894	uid_t i_uid;
2895	gid_t i_gid;
2896
2897	inode = iget_locked(sb, ino);
2898	if (!inode)
2899		return ERR_PTR(-ENOMEM);
2900	if (!(inode->i_state & I_NEW))
2901		return inode;
2902
2903	ei = EXT3_I(inode);
2904	ei->i_block_alloc_info = NULL;
2905
2906	ret = __ext3_get_inode_loc(inode, &iloc, 0);
2907	if (ret < 0)
2908		goto bad_inode;
2909	bh = iloc.bh;
2910	raw_inode = ext3_raw_inode(&iloc);
2911	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2912	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2913	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2914	if(!(test_opt (inode->i_sb, NO_UID32))) {
2915		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2916		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2917	}
2918	i_uid_write(inode, i_uid);
2919	i_gid_write(inode, i_gid);
2920	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
2921	inode->i_size = le32_to_cpu(raw_inode->i_size);
2922	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2923	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2924	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2925	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2926
2927	ei->i_state_flags = 0;
2928	ei->i_dir_start_lookup = 0;
2929	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2930	/* We now have enough fields to check if the inode was active or not.
2931	 * This is needed because nfsd might try to access dead inodes
2932	 * the test is that same one that e2fsck uses
2933	 * NeilBrown 1999oct15
2934	 */
2935	if (inode->i_nlink == 0) {
2936		if (inode->i_mode == 0 ||
2937		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2938			/* this inode is deleted */
2939			brelse (bh);
2940			ret = -ESTALE;
2941			goto bad_inode;
2942		}
2943		/* The only unlinked inodes we let through here have
2944		 * valid i_mode and are being read by the orphan
2945		 * recovery code: that's fine, we're about to complete
2946		 * the process of deleting those. */
2947	}
2948	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2949	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2950#ifdef EXT3_FRAGMENTS
2951	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2952	ei->i_frag_no = raw_inode->i_frag;
2953	ei->i_frag_size = raw_inode->i_fsize;
2954#endif
2955	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2956	if (!S_ISREG(inode->i_mode)) {
2957		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2958	} else {
2959		inode->i_size |=
2960			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2961	}
2962	ei->i_disksize = inode->i_size;
2963	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2964	ei->i_block_group = iloc.block_group;
2965	/*
2966	 * NOTE! The in-memory inode i_data array is in little-endian order
2967	 * even on big-endian machines: we do NOT byteswap the block numbers!
2968	 */
2969	for (block = 0; block < EXT3_N_BLOCKS; block++)
2970		ei->i_data[block] = raw_inode->i_block[block];
2971	INIT_LIST_HEAD(&ei->i_orphan);
2972
2973	/*
2974	 * Set transaction id's of transactions that have to be committed
2975	 * to finish f[data]sync. We set them to currently running transaction
2976	 * as we cannot be sure that the inode or some of its metadata isn't
2977	 * part of the transaction - the inode could have been reclaimed and
2978	 * now it is reread from disk.
2979	 */
2980	if (journal) {
2981		tid_t tid;
2982
2983		spin_lock(&journal->j_state_lock);
2984		if (journal->j_running_transaction)
2985			transaction = journal->j_running_transaction;
2986		else
2987			transaction = journal->j_committing_transaction;
2988		if (transaction)
2989			tid = transaction->t_tid;
2990		else
2991			tid = journal->j_commit_sequence;
2992		spin_unlock(&journal->j_state_lock);
2993		atomic_set(&ei->i_sync_tid, tid);
2994		atomic_set(&ei->i_datasync_tid, tid);
2995	}
2996
2997	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2998	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2999		/*
3000		 * When mke2fs creates big inodes it does not zero out
3001		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
3002		 * so ignore those first few inodes.
3003		 */
3004		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3005		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3006		    EXT3_INODE_SIZE(inode->i_sb)) {
3007			brelse (bh);
3008			ret = -EIO;
3009			goto bad_inode;
3010		}
3011		if (ei->i_extra_isize == 0) {
3012			/* The extra space is currently unused. Use it. */
3013			ei->i_extra_isize = sizeof(struct ext3_inode) -
3014					    EXT3_GOOD_OLD_INODE_SIZE;
3015		} else {
3016			__le32 *magic = (void *)raw_inode +
3017					EXT3_GOOD_OLD_INODE_SIZE +
3018					ei->i_extra_isize;
3019			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
3020				 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
3021		}
3022	} else
3023		ei->i_extra_isize = 0;
3024
3025	if (S_ISREG(inode->i_mode)) {
3026		inode->i_op = &ext3_file_inode_operations;
3027		inode->i_fop = &ext3_file_operations;
3028		ext3_set_aops(inode);
3029	} else if (S_ISDIR(inode->i_mode)) {
3030		inode->i_op = &ext3_dir_inode_operations;
3031		inode->i_fop = &ext3_dir_operations;
3032	} else if (S_ISLNK(inode->i_mode)) {
3033		if (ext3_inode_is_fast_symlink(inode)) {
3034			inode->i_op = &ext3_fast_symlink_inode_operations;
3035			nd_terminate_link(ei->i_data, inode->i_size,
3036				sizeof(ei->i_data) - 1);
3037		} else {
3038			inode->i_op = &ext3_symlink_inode_operations;
3039			ext3_set_aops(inode);
3040		}
3041	} else {
3042		inode->i_op = &ext3_special_inode_operations;
3043		if (raw_inode->i_block[0])
3044			init_special_inode(inode, inode->i_mode,
3045			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3046		else
3047			init_special_inode(inode, inode->i_mode,
3048			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3049	}
3050	brelse (iloc.bh);
3051	ext3_set_inode_flags(inode);
3052	unlock_new_inode(inode);
3053	return inode;
3054
3055bad_inode:
3056	iget_failed(inode);
3057	return ERR_PTR(ret);
3058}
3059
3060/*
3061 * Post the struct inode info into an on-disk inode location in the
3062 * buffer-cache.  This gobbles the caller's reference to the
3063 * buffer_head in the inode location struct.
3064 *
3065 * The caller must have write access to iloc->bh.
3066 */
3067static int ext3_do_update_inode(handle_t *handle,
3068				struct inode *inode,
3069				struct ext3_iloc *iloc)
3070{
3071	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
3072	struct ext3_inode_info *ei = EXT3_I(inode);
3073	struct buffer_head *bh = iloc->bh;
3074	int err = 0, rc, block;
3075	int need_datasync = 0;
3076	__le32 disksize;
3077	uid_t i_uid;
3078	gid_t i_gid;
3079
3080again:
3081	/* we can't allow multiple procs in here at once, its a bit racey */
3082	lock_buffer(bh);
3083
3084	/* For fields not not tracking in the in-memory inode,
3085	 * initialise them to zero for new inodes. */
3086	if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
3087		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
3088
3089	ext3_get_inode_flags(ei);
3090	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3091	i_uid = i_uid_read(inode);
3092	i_gid = i_gid_read(inode);
3093	if(!(test_opt(inode->i_sb, NO_UID32))) {
3094		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
3095		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
3096/*
3097 * Fix up interoperability with old kernels. Otherwise, old inodes get
3098 * re-used with the upper 16 bits of the uid/gid intact
3099 */
3100		if(!ei->i_dtime) {
3101			raw_inode->i_uid_high =
3102				cpu_to_le16(high_16_bits(i_uid));
3103			raw_inode->i_gid_high =
3104				cpu_to_le16(high_16_bits(i_gid));
3105		} else {
3106			raw_inode->i_uid_high = 0;
3107			raw_inode->i_gid_high = 0;
3108		}
3109	} else {
3110		raw_inode->i_uid_low =
3111			cpu_to_le16(fs_high2lowuid(i_uid));
3112		raw_inode->i_gid_low =
3113			cpu_to_le16(fs_high2lowgid(i_gid));
3114		raw_inode->i_uid_high = 0;
3115		raw_inode->i_gid_high = 0;
3116	}
3117	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
3118	disksize = cpu_to_le32(ei->i_disksize);
3119	if (disksize != raw_inode->i_size) {
3120		need_datasync = 1;
3121		raw_inode->i_size = disksize;
3122	}
3123	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
3124	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
3125	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
3126	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
3127	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
3128	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
3129#ifdef EXT3_FRAGMENTS
3130	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
3131	raw_inode->i_frag = ei->i_frag_no;
3132	raw_inode->i_fsize = ei->i_frag_size;
3133#endif
3134	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
3135	if (!S_ISREG(inode->i_mode)) {
3136		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
3137	} else {
3138		disksize = cpu_to_le32(ei->i_disksize >> 32);
3139		if (disksize != raw_inode->i_size_high) {
3140			raw_inode->i_size_high = disksize;
3141			need_datasync = 1;
3142		}
3143		if (ei->i_disksize > 0x7fffffffULL) {
3144			struct super_block *sb = inode->i_sb;
3145			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
3146					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
3147			    EXT3_SB(sb)->s_es->s_rev_level ==
3148					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
3149			       /* If this is the first large file
3150				* created, add a flag to the superblock.
3151				*/
3152				unlock_buffer(bh);
3153				err = ext3_journal_get_write_access(handle,
3154						EXT3_SB(sb)->s_sbh);
3155				if (err)
3156					goto out_brelse;
3157
3158				ext3_update_dynamic_rev(sb);
3159				EXT3_SET_RO_COMPAT_FEATURE(sb,
3160					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
3161				handle->h_sync = 1;
3162				err = ext3_journal_dirty_metadata(handle,
3163						EXT3_SB(sb)->s_sbh);
3164				/* get our lock and start over */
3165				goto again;
3166			}
3167		}
3168	}
3169	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3170	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3171		if (old_valid_dev(inode->i_rdev)) {
3172			raw_inode->i_block[0] =
3173				cpu_to_le32(old_encode_dev(inode->i_rdev));
3174			raw_inode->i_block[1] = 0;
3175		} else {
3176			raw_inode->i_block[0] = 0;
3177			raw_inode->i_block[1] =
3178				cpu_to_le32(new_encode_dev(inode->i_rdev));
3179			raw_inode->i_block[2] = 0;
3180		}
3181	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
3182		raw_inode->i_block[block] = ei->i_data[block];
3183
3184	if (ei->i_extra_isize)
3185		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3186
3187	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
3188	unlock_buffer(bh);
3189	rc = ext3_journal_dirty_metadata(handle, bh);
3190	if (!err)
3191		err = rc;
3192	ext3_clear_inode_state(inode, EXT3_STATE_NEW);
3193
3194	atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
3195	if (need_datasync)
3196		atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
3197out_brelse:
3198	brelse (bh);
3199	ext3_std_error(inode->i_sb, err);
3200	return err;
3201}
3202
3203/*
3204 * ext3_write_inode()
3205 *
3206 * We are called from a few places:
3207 *
3208 * - Within generic_file_write() for O_SYNC files.
3209 *   Here, there will be no transaction running. We wait for any running
3210 *   trasnaction to commit.
3211 *
3212 * - Within sys_sync(), kupdate and such.
3213 *   We wait on commit, if tol to.
3214 *
3215 * - Within prune_icache() (PF_MEMALLOC == true)
3216 *   Here we simply return.  We can't afford to block kswapd on the
3217 *   journal commit.
3218 *
3219 * In all cases it is actually safe for us to return without doing anything,
3220 * because the inode has been copied into a raw inode buffer in
3221 * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
3222 * knfsd.
3223 *
3224 * Note that we are absolutely dependent upon all inode dirtiers doing the
3225 * right thing: they *must* call mark_inode_dirty() after dirtying info in
3226 * which we are interested.
3227 *
3228 * It would be a bug for them to not do this.  The code:
3229 *
3230 *	mark_inode_dirty(inode)
3231 *	stuff();
3232 *	inode->i_size = expr;
3233 *
3234 * is in error because a kswapd-driven write_inode() could occur while
3235 * `stuff()' is running, and the new i_size will be lost.  Plus the inode
3236 * will no longer be on the superblock's dirty inode list.
3237 */
3238int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
3239{
3240	if (current->flags & PF_MEMALLOC)
3241		return 0;
3242
3243	if (ext3_journal_current_handle()) {
3244		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3245		dump_stack();
3246		return -EIO;
3247	}
3248
3249	if (wbc->sync_mode != WB_SYNC_ALL)
3250		return 0;
3251
3252	return ext3_force_commit(inode->i_sb);
3253}
3254
3255/*
3256 * ext3_setattr()
3257 *
3258 * Called from notify_change.
3259 *
3260 * We want to trap VFS attempts to truncate the file as soon as
3261 * possible.  In particular, we want to make sure that when the VFS
3262 * shrinks i_size, we put the inode on the orphan list and modify
3263 * i_disksize immediately, so that during the subsequent flushing of
3264 * dirty pages and freeing of disk blocks, we can guarantee that any
3265 * commit will leave the blocks being flushed in an unused state on
3266 * disk.  (On recovery, the inode will get truncated and the blocks will
3267 * be freed, so we have a strong guarantee that no future commit will
3268 * leave these blocks visible to the user.)
3269 *
3270 * Called with inode->sem down.
3271 */
3272int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3273{
3274	struct inode *inode = dentry->d_inode;
3275	int error, rc = 0;
3276	const unsigned int ia_valid = attr->ia_valid;
3277
3278	error = inode_change_ok(inode, attr);
3279	if (error)
3280		return error;
3281
3282	if (is_quota_modification(inode, attr))
3283		dquot_initialize(inode);
3284	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3285	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3286		handle_t *handle;
3287
3288		/* (user+group)*(old+new) structure, inode write (sb,
3289		 * inode block, ? - but truncate inode update has it) */
3290		handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
3291					EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
3292		if (IS_ERR(handle)) {
3293			error = PTR_ERR(handle);
3294			goto err_out;
3295		}
3296		error = dquot_transfer(inode, attr);
3297		if (error) {
3298			ext3_journal_stop(handle);
3299			return error;
3300		}
3301		/* Update corresponding info in inode so that everything is in
3302		 * one transaction */
3303		if (attr->ia_valid & ATTR_UID)
3304			inode->i_uid = attr->ia_uid;
3305		if (attr->ia_valid & ATTR_GID)
3306			inode->i_gid = attr->ia_gid;
3307		error = ext3_mark_inode_dirty(handle, inode);
3308		ext3_journal_stop(handle);
3309	}
3310
3311	if (attr->ia_valid & ATTR_SIZE)
3312		inode_dio_wait(inode);
3313
3314	if (S_ISREG(inode->i_mode) &&
3315	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3316		handle_t *handle;
3317
3318		handle = ext3_journal_start(inode, 3);
3319		if (IS_ERR(handle)) {
3320			error = PTR_ERR(handle);
3321			goto err_out;
3322		}
3323
3324		error = ext3_orphan_add(handle, inode);
3325		if (error) {
3326			ext3_journal_stop(handle);
3327			goto err_out;
3328		}
3329		EXT3_I(inode)->i_disksize = attr->ia_size;
3330		error = ext3_mark_inode_dirty(handle, inode);
3331		ext3_journal_stop(handle);
3332		if (error) {
3333			/* Some hard fs error must have happened. Bail out. */
3334			ext3_orphan_del(NULL, inode);
3335			goto err_out;
3336		}
3337		rc = ext3_block_truncate_page(inode, attr->ia_size);
3338		if (rc) {
3339			/* Cleanup orphan list and exit */
3340			handle = ext3_journal_start(inode, 3);
3341			if (IS_ERR(handle)) {
3342				ext3_orphan_del(NULL, inode);
3343				goto err_out;
3344			}
3345			ext3_orphan_del(handle, inode);
3346			ext3_journal_stop(handle);
3347			goto err_out;
3348		}
3349	}
3350
3351	if ((attr->ia_valid & ATTR_SIZE) &&
3352	    attr->ia_size != i_size_read(inode)) {
3353		truncate_setsize(inode, attr->ia_size);
3354		ext3_truncate(inode);
3355	}
3356
3357	setattr_copy(inode, attr);
3358	mark_inode_dirty(inode);
3359
3360	if (ia_valid & ATTR_MODE)
3361		rc = ext3_acl_chmod(inode);
3362
3363err_out:
3364	ext3_std_error(inode->i_sb, error);
3365	if (!error)
3366		error = rc;
3367	return error;
3368}
3369
3370
3371/*
3372 * How many blocks doth make a writepage()?
3373 *
3374 * With N blocks per page, it may be:
3375 * N data blocks
3376 * 2 indirect block
3377 * 2 dindirect
3378 * 1 tindirect
3379 * N+5 bitmap blocks (from the above)
3380 * N+5 group descriptor summary blocks
3381 * 1 inode block
3382 * 1 superblock.
3383 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3384 *
3385 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3386 *
3387 * With ordered or writeback data it's the same, less the N data blocks.
3388 *
3389 * If the inode's direct blocks can hold an integral number of pages then a
3390 * page cannot straddle two indirect blocks, and we can only touch one indirect
3391 * and dindirect block, and the "5" above becomes "3".
3392 *
3393 * This still overestimates under most circumstances.  If we were to pass the
3394 * start and end offsets in here as well we could do block_to_path() on each
3395 * block and work out the exact number of indirects which are touched.  Pah.
3396 */
3397
3398static int ext3_writepage_trans_blocks(struct inode *inode)
3399{
3400	int bpp = ext3_journal_blocks_per_page(inode);
3401	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3402	int ret;
3403
3404	if (ext3_should_journal_data(inode))
3405		ret = 3 * (bpp + indirects) + 2;
3406	else
3407		ret = 2 * (bpp + indirects) + indirects + 2;
3408
3409#ifdef CONFIG_QUOTA
3410	/* We know that structure was already allocated during dquot_initialize so
3411	 * we will be updating only the data blocks + inodes */
3412	ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
3413#endif
3414
3415	return ret;
3416}
3417
3418/*
3419 * The caller must have previously called ext3_reserve_inode_write().
3420 * Give this, we know that the caller already has write access to iloc->bh.
3421 */
3422int ext3_mark_iloc_dirty(handle_t *handle,
3423		struct inode *inode, struct ext3_iloc *iloc)
3424{
3425	int err = 0;
3426
3427	/* the do_update_inode consumes one bh->b_count */
3428	get_bh(iloc->bh);
3429
3430	/* ext3_do_update_inode() does journal_dirty_metadata */
3431	err = ext3_do_update_inode(handle, inode, iloc);
3432	put_bh(iloc->bh);
3433	return err;
3434}
3435
3436/*
3437 * On success, We end up with an outstanding reference count against
3438 * iloc->bh.  This _must_ be cleaned up later.
3439 */
3440
3441int
3442ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3443			 struct ext3_iloc *iloc)
3444{
3445	int err = 0;
3446	if (handle) {
3447		err = ext3_get_inode_loc(inode, iloc);
3448		if (!err) {
3449			BUFFER_TRACE(iloc->bh, "get_write_access");
3450			err = ext3_journal_get_write_access(handle, iloc->bh);
3451			if (err) {
3452				brelse(iloc->bh);
3453				iloc->bh = NULL;
3454			}
3455		}
3456	}
3457	ext3_std_error(inode->i_sb, err);
3458	return err;
3459}
3460
3461/*
3462 * What we do here is to mark the in-core inode as clean with respect to inode
3463 * dirtiness (it may still be data-dirty).
3464 * This means that the in-core inode may be reaped by prune_icache
3465 * without having to perform any I/O.  This is a very good thing,
3466 * because *any* task may call prune_icache - even ones which
3467 * have a transaction open against a different journal.
3468 *
3469 * Is this cheating?  Not really.  Sure, we haven't written the
3470 * inode out, but prune_icache isn't a user-visible syncing function.
3471 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3472 * we start and wait on commits.
3473 *
3474 * Is this efficient/effective?  Well, we're being nice to the system
3475 * by cleaning up our inodes proactively so they can be reaped
3476 * without I/O.  But we are potentially leaving up to five seconds'
3477 * worth of inodes floating about which prune_icache wants us to
3478 * write out.  One way to fix that would be to get prune_icache()
3479 * to do a write_super() to free up some memory.  It has the desired
3480 * effect.
3481 */
3482int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3483{
3484	struct ext3_iloc iloc;
3485	int err;
3486
3487	might_sleep();
3488	trace_ext3_mark_inode_dirty(inode, _RET_IP_);
3489	err = ext3_reserve_inode_write(handle, inode, &iloc);
3490	if (!err)
3491		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3492	return err;
3493}
3494
3495/*
3496 * ext3_dirty_inode() is called from __mark_inode_dirty()
3497 *
3498 * We're really interested in the case where a file is being extended.
3499 * i_size has been changed by generic_commit_write() and we thus need
3500 * to include the updated inode in the current transaction.
3501 *
3502 * Also, dquot_alloc_space() will always dirty the inode when blocks
3503 * are allocated to the file.
3504 *
3505 * If the inode is marked synchronous, we don't honour that here - doing
3506 * so would cause a commit on atime updates, which we don't bother doing.
3507 * We handle synchronous inodes at the highest possible level.
3508 */
3509void ext3_dirty_inode(struct inode *inode, int flags)
3510{
3511	handle_t *current_handle = ext3_journal_current_handle();
3512	handle_t *handle;
3513
3514	handle = ext3_journal_start(inode, 2);
3515	if (IS_ERR(handle))
3516		goto out;
3517	if (current_handle &&
3518		current_handle->h_transaction != handle->h_transaction) {
3519		/* This task has a transaction open against a different fs */
3520		printk(KERN_EMERG "%s: transactions do not match!\n",
3521		       __func__);
3522	} else {
3523		jbd_debug(5, "marking dirty.  outer handle=%p\n",
3524				current_handle);
3525		ext3_mark_inode_dirty(handle, inode);
3526	}
3527	ext3_journal_stop(handle);
3528out:
3529	return;
3530}
3531
3532#if 0
3533/*
3534 * Bind an inode's backing buffer_head into this transaction, to prevent
3535 * it from being flushed to disk early.  Unlike
3536 * ext3_reserve_inode_write, this leaves behind no bh reference and
3537 * returns no iloc structure, so the caller needs to repeat the iloc
3538 * lookup to mark the inode dirty later.
3539 */
3540static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3541{
3542	struct ext3_iloc iloc;
3543
3544	int err = 0;
3545	if (handle) {
3546		err = ext3_get_inode_loc(inode, &iloc);
3547		if (!err) {
3548			BUFFER_TRACE(iloc.bh, "get_write_access");
3549			err = journal_get_write_access(handle, iloc.bh);
3550			if (!err)
3551				err = ext3_journal_dirty_metadata(handle,
3552								  iloc.bh);
3553			brelse(iloc.bh);
3554		}
3555	}
3556	ext3_std_error(inode->i_sb, err);
3557	return err;
3558}
3559#endif
3560
3561int ext3_change_inode_journal_flag(struct inode *inode, int val)
3562{
3563	journal_t *journal;
3564	handle_t *handle;
3565	int err;
3566
3567	/*
3568	 * We have to be very careful here: changing a data block's
3569	 * journaling status dynamically is dangerous.  If we write a
3570	 * data block to the journal, change the status and then delete
3571	 * that block, we risk forgetting to revoke the old log record
3572	 * from the journal and so a subsequent replay can corrupt data.
3573	 * So, first we make sure that the journal is empty and that
3574	 * nobody is changing anything.
3575	 */
3576
3577	journal = EXT3_JOURNAL(inode);
3578	if (is_journal_aborted(journal))
3579		return -EROFS;
3580
3581	journal_lock_updates(journal);
3582	journal_flush(journal);
3583
3584	/*
3585	 * OK, there are no updates running now, and all cached data is
3586	 * synced to disk.  We are now in a completely consistent state
3587	 * which doesn't have anything in the journal, and we know that
3588	 * no filesystem updates are running, so it is safe to modify
3589	 * the inode's in-core data-journaling state flag now.
3590	 */
3591
3592	if (val)
3593		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3594	else
3595		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3596	ext3_set_aops(inode);
3597
3598	journal_unlock_updates(journal);
3599
3600	/* Finally we can mark the inode as dirty. */
3601
3602	handle = ext3_journal_start(inode, 1);
3603	if (IS_ERR(handle))
3604		return PTR_ERR(handle);
3605
3606	err = ext3_mark_inode_dirty(handle, inode);
3607	handle->h_sync = 1;
3608	ext3_journal_stop(handle);
3609	ext3_std_error(inode->i_sb, err);
3610
3611	return err;
3612}
v3.1
   1/*
   2 *  linux/fs/ext3/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  Goal-directed block allocation by Stephen Tweedie
  16 *	(sct@redhat.com), 1993, 1998
  17 *  Big-endian to little-endian byte-swapping/bitmaps by
  18 *        David S. Miller (davem@caip.rutgers.edu), 1995
  19 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  20 *	(jj@sunsite.ms.mff.cuni.cz)
  21 *
  22 *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/fs.h>
  27#include <linux/time.h>
  28#include <linux/ext3_jbd.h>
  29#include <linux/jbd.h>
  30#include <linux/highuid.h>
  31#include <linux/pagemap.h>
  32#include <linux/quotaops.h>
  33#include <linux/string.h>
  34#include <linux/buffer_head.h>
  35#include <linux/writeback.h>
  36#include <linux/mpage.h>
  37#include <linux/uio.h>
  38#include <linux/bio.h>
  39#include <linux/fiemap.h>
  40#include <linux/namei.h>
  41#include <trace/events/ext3.h>
  42#include "xattr.h"
  43#include "acl.h"
  44
  45static int ext3_writepage_trans_blocks(struct inode *inode);
  46static int ext3_block_truncate_page(struct inode *inode, loff_t from);
  47
  48/*
  49 * Test whether an inode is a fast symlink.
  50 */
  51static int ext3_inode_is_fast_symlink(struct inode *inode)
  52{
  53	int ea_blocks = EXT3_I(inode)->i_file_acl ?
  54		(inode->i_sb->s_blocksize >> 9) : 0;
  55
  56	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  57}
  58
  59/*
  60 * The ext3 forget function must perform a revoke if we are freeing data
  61 * which has been journaled.  Metadata (eg. indirect blocks) must be
  62 * revoked in all cases.
  63 *
  64 * "bh" may be NULL: a metadata block may have been freed from memory
  65 * but there may still be a record of it in the journal, and that record
  66 * still needs to be revoked.
  67 */
  68int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  69			struct buffer_head *bh, ext3_fsblk_t blocknr)
  70{
  71	int err;
  72
  73	might_sleep();
  74
  75	trace_ext3_forget(inode, is_metadata, blocknr);
  76	BUFFER_TRACE(bh, "enter");
  77
  78	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  79		  "data mode %lx\n",
  80		  bh, is_metadata, inode->i_mode,
  81		  test_opt(inode->i_sb, DATA_FLAGS));
  82
  83	/* Never use the revoke function if we are doing full data
  84	 * journaling: there is no need to, and a V1 superblock won't
  85	 * support it.  Otherwise, only skip the revoke on un-journaled
  86	 * data blocks. */
  87
  88	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
  89	    (!is_metadata && !ext3_should_journal_data(inode))) {
  90		if (bh) {
  91			BUFFER_TRACE(bh, "call journal_forget");
  92			return ext3_journal_forget(handle, bh);
  93		}
  94		return 0;
  95	}
  96
  97	/*
  98	 * data!=journal && (is_metadata || should_journal_data(inode))
  99	 */
 100	BUFFER_TRACE(bh, "call ext3_journal_revoke");
 101	err = ext3_journal_revoke(handle, blocknr, bh);
 102	if (err)
 103		ext3_abort(inode->i_sb, __func__,
 104			   "error %d when attempting revoke", err);
 105	BUFFER_TRACE(bh, "exit");
 106	return err;
 107}
 108
 109/*
 110 * Work out how many blocks we need to proceed with the next chunk of a
 111 * truncate transaction.
 112 */
 113static unsigned long blocks_for_truncate(struct inode *inode)
 114{
 115	unsigned long needed;
 116
 117	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
 118
 119	/* Give ourselves just enough room to cope with inodes in which
 120	 * i_blocks is corrupt: we've seen disk corruptions in the past
 121	 * which resulted in random data in an inode which looked enough
 122	 * like a regular file for ext3 to try to delete it.  Things
 123	 * will go a bit crazy if that happens, but at least we should
 124	 * try not to panic the whole kernel. */
 125	if (needed < 2)
 126		needed = 2;
 127
 128	/* But we need to bound the transaction so we don't overflow the
 129	 * journal. */
 130	if (needed > EXT3_MAX_TRANS_DATA)
 131		needed = EXT3_MAX_TRANS_DATA;
 132
 133	return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
 134}
 135
 136/*
 137 * Truncate transactions can be complex and absolutely huge.  So we need to
 138 * be able to restart the transaction at a conventient checkpoint to make
 139 * sure we don't overflow the journal.
 140 *
 141 * start_transaction gets us a new handle for a truncate transaction,
 142 * and extend_transaction tries to extend the existing one a bit.  If
 143 * extend fails, we need to propagate the failure up and restart the
 144 * transaction in the top-level truncate loop. --sct
 145 */
 146static handle_t *start_transaction(struct inode *inode)
 147{
 148	handle_t *result;
 149
 150	result = ext3_journal_start(inode, blocks_for_truncate(inode));
 151	if (!IS_ERR(result))
 152		return result;
 153
 154	ext3_std_error(inode->i_sb, PTR_ERR(result));
 155	return result;
 156}
 157
 158/*
 159 * Try to extend this transaction for the purposes of truncation.
 160 *
 161 * Returns 0 if we managed to create more room.  If we can't create more
 162 * room, and the transaction must be restarted we return 1.
 163 */
 164static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
 165{
 166	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
 167		return 0;
 168	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
 169		return 0;
 170	return 1;
 171}
 172
 173/*
 174 * Restart the transaction associated with *handle.  This does a commit,
 175 * so before we call here everything must be consistently dirtied against
 176 * this transaction.
 177 */
 178static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
 179{
 180	int ret;
 181
 182	jbd_debug(2, "restarting handle %p\n", handle);
 183	/*
 184	 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
 185	 * At this moment, get_block can be called only for blocks inside
 186	 * i_size since page cache has been already dropped and writes are
 187	 * blocked by i_mutex. So we can safely drop the truncate_mutex.
 188	 */
 189	mutex_unlock(&EXT3_I(inode)->truncate_mutex);
 190	ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
 191	mutex_lock(&EXT3_I(inode)->truncate_mutex);
 192	return ret;
 193}
 194
 195/*
 196 * Called at inode eviction from icache
 197 */
 198void ext3_evict_inode (struct inode *inode)
 199{
 200	struct ext3_inode_info *ei = EXT3_I(inode);
 201	struct ext3_block_alloc_info *rsv;
 202	handle_t *handle;
 203	int want_delete = 0;
 204
 205	trace_ext3_evict_inode(inode);
 206	if (!inode->i_nlink && !is_bad_inode(inode)) {
 207		dquot_initialize(inode);
 208		want_delete = 1;
 209	}
 210
 211	/*
 212	 * When journalling data dirty buffers are tracked only in the journal.
 213	 * So although mm thinks everything is clean and ready for reaping the
 214	 * inode might still have some pages to write in the running
 215	 * transaction or waiting to be checkpointed. Thus calling
 216	 * journal_invalidatepage() (via truncate_inode_pages()) to discard
 217	 * these buffers can cause data loss. Also even if we did not discard
 218	 * these buffers, we would have no way to find them after the inode
 219	 * is reaped and thus user could see stale data if he tries to read
 220	 * them before the transaction is checkpointed. So be careful and
 221	 * force everything to disk here... We use ei->i_datasync_tid to
 222	 * store the newest transaction containing inode's data.
 223	 *
 224	 * Note that directories do not have this problem because they don't
 225	 * use page cache.
 
 
 
 226	 */
 227	if (inode->i_nlink && ext3_should_journal_data(inode) &&
 
 228	    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
 229		tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
 230		journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
 231
 232		log_start_commit(journal, commit_tid);
 233		log_wait_commit(journal, commit_tid);
 234		filemap_write_and_wait(&inode->i_data);
 235	}
 236	truncate_inode_pages(&inode->i_data, 0);
 237
 238	ext3_discard_reservation(inode);
 239	rsv = ei->i_block_alloc_info;
 240	ei->i_block_alloc_info = NULL;
 241	if (unlikely(rsv))
 242		kfree(rsv);
 243
 244	if (!want_delete)
 245		goto no_delete;
 246
 247	handle = start_transaction(inode);
 248	if (IS_ERR(handle)) {
 249		/*
 250		 * If we're going to skip the normal cleanup, we still need to
 251		 * make sure that the in-core orphan linked list is properly
 252		 * cleaned up.
 253		 */
 254		ext3_orphan_del(NULL, inode);
 255		goto no_delete;
 256	}
 257
 258	if (IS_SYNC(inode))
 259		handle->h_sync = 1;
 260	inode->i_size = 0;
 261	if (inode->i_blocks)
 262		ext3_truncate(inode);
 263	/*
 264	 * Kill off the orphan record created when the inode lost the last
 265	 * link.  Note that ext3_orphan_del() has to be able to cope with the
 266	 * deletion of a non-existent orphan - ext3_truncate() could
 267	 * have removed the record.
 268	 */
 269	ext3_orphan_del(handle, inode);
 270	ei->i_dtime = get_seconds();
 271
 272	/*
 273	 * One subtle ordering requirement: if anything has gone wrong
 274	 * (transaction abort, IO errors, whatever), then we can still
 275	 * do these next steps (the fs will already have been marked as
 276	 * having errors), but we can't free the inode if the mark_dirty
 277	 * fails.
 278	 */
 279	if (ext3_mark_inode_dirty(handle, inode)) {
 280		/* If that failed, just dquot_drop() and be done with that */
 281		dquot_drop(inode);
 282		end_writeback(inode);
 283	} else {
 284		ext3_xattr_delete_inode(handle, inode);
 285		dquot_free_inode(inode);
 286		dquot_drop(inode);
 287		end_writeback(inode);
 288		ext3_free_inode(handle, inode);
 289	}
 290	ext3_journal_stop(handle);
 291	return;
 292no_delete:
 293	end_writeback(inode);
 294	dquot_drop(inode);
 295}
 296
 297typedef struct {
 298	__le32	*p;
 299	__le32	key;
 300	struct buffer_head *bh;
 301} Indirect;
 302
 303static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
 304{
 305	p->key = *(p->p = v);
 306	p->bh = bh;
 307}
 308
 309static int verify_chain(Indirect *from, Indirect *to)
 310{
 311	while (from <= to && from->key == *from->p)
 312		from++;
 313	return (from > to);
 314}
 315
 316/**
 317 *	ext3_block_to_path - parse the block number into array of offsets
 318 *	@inode: inode in question (we are only interested in its superblock)
 319 *	@i_block: block number to be parsed
 320 *	@offsets: array to store the offsets in
 321 *      @boundary: set this non-zero if the referred-to block is likely to be
 322 *             followed (on disk) by an indirect block.
 323 *
 324 *	To store the locations of file's data ext3 uses a data structure common
 325 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 326 *	data blocks at leaves and indirect blocks in intermediate nodes.
 327 *	This function translates the block number into path in that tree -
 328 *	return value is the path length and @offsets[n] is the offset of
 329 *	pointer to (n+1)th node in the nth one. If @block is out of range
 330 *	(negative or too large) warning is printed and zero returned.
 331 *
 332 *	Note: function doesn't find node addresses, so no IO is needed. All
 333 *	we need to know is the capacity of indirect blocks (taken from the
 334 *	inode->i_sb).
 335 */
 336
 337/*
 338 * Portability note: the last comparison (check that we fit into triple
 339 * indirect block) is spelled differently, because otherwise on an
 340 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 341 * if our filesystem had 8Kb blocks. We might use long long, but that would
 342 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 343 * i_block would have to be negative in the very beginning, so we would not
 344 * get there at all.
 345 */
 346
 347static int ext3_block_to_path(struct inode *inode,
 348			long i_block, int offsets[4], int *boundary)
 349{
 350	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
 351	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
 352	const long direct_blocks = EXT3_NDIR_BLOCKS,
 353		indirect_blocks = ptrs,
 354		double_blocks = (1 << (ptrs_bits * 2));
 355	int n = 0;
 356	int final = 0;
 357
 358	if (i_block < 0) {
 359		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
 360	} else if (i_block < direct_blocks) {
 361		offsets[n++] = i_block;
 362		final = direct_blocks;
 363	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
 364		offsets[n++] = EXT3_IND_BLOCK;
 365		offsets[n++] = i_block;
 366		final = ptrs;
 367	} else if ((i_block -= indirect_blocks) < double_blocks) {
 368		offsets[n++] = EXT3_DIND_BLOCK;
 369		offsets[n++] = i_block >> ptrs_bits;
 370		offsets[n++] = i_block & (ptrs - 1);
 371		final = ptrs;
 372	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
 373		offsets[n++] = EXT3_TIND_BLOCK;
 374		offsets[n++] = i_block >> (ptrs_bits * 2);
 375		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
 376		offsets[n++] = i_block & (ptrs - 1);
 377		final = ptrs;
 378	} else {
 379		ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
 380	}
 381	if (boundary)
 382		*boundary = final - 1 - (i_block & (ptrs - 1));
 383	return n;
 384}
 385
 386/**
 387 *	ext3_get_branch - read the chain of indirect blocks leading to data
 388 *	@inode: inode in question
 389 *	@depth: depth of the chain (1 - direct pointer, etc.)
 390 *	@offsets: offsets of pointers in inode/indirect blocks
 391 *	@chain: place to store the result
 392 *	@err: here we store the error value
 393 *
 394 *	Function fills the array of triples <key, p, bh> and returns %NULL
 395 *	if everything went OK or the pointer to the last filled triple
 396 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 397 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 398 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 399 *	number (it points into struct inode for i==0 and into the bh->b_data
 400 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 401 *	block for i>0 and NULL for i==0. In other words, it holds the block
 402 *	numbers of the chain, addresses they were taken from (and where we can
 403 *	verify that chain did not change) and buffer_heads hosting these
 404 *	numbers.
 405 *
 406 *	Function stops when it stumbles upon zero pointer (absent block)
 407 *		(pointer to last triple returned, *@err == 0)
 408 *	or when it gets an IO error reading an indirect block
 409 *		(ditto, *@err == -EIO)
 410 *	or when it notices that chain had been changed while it was reading
 411 *		(ditto, *@err == -EAGAIN)
 412 *	or when it reads all @depth-1 indirect blocks successfully and finds
 413 *	the whole chain, all way to the data (returns %NULL, *err == 0).
 414 */
 415static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
 416				 Indirect chain[4], int *err)
 417{
 418	struct super_block *sb = inode->i_sb;
 419	Indirect *p = chain;
 420	struct buffer_head *bh;
 421
 422	*err = 0;
 423	/* i_data is not going away, no lock needed */
 424	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
 425	if (!p->key)
 426		goto no_block;
 427	while (--depth) {
 428		bh = sb_bread(sb, le32_to_cpu(p->key));
 429		if (!bh)
 430			goto failure;
 431		/* Reader: pointers */
 432		if (!verify_chain(chain, p))
 433			goto changed;
 434		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
 435		/* Reader: end */
 436		if (!p->key)
 437			goto no_block;
 438	}
 439	return NULL;
 440
 441changed:
 442	brelse(bh);
 443	*err = -EAGAIN;
 444	goto no_block;
 445failure:
 446	*err = -EIO;
 447no_block:
 448	return p;
 449}
 450
 451/**
 452 *	ext3_find_near - find a place for allocation with sufficient locality
 453 *	@inode: owner
 454 *	@ind: descriptor of indirect block.
 455 *
 456 *	This function returns the preferred place for block allocation.
 457 *	It is used when heuristic for sequential allocation fails.
 458 *	Rules are:
 459 *	  + if there is a block to the left of our position - allocate near it.
 460 *	  + if pointer will live in indirect block - allocate near that block.
 461 *	  + if pointer will live in inode - allocate in the same
 462 *	    cylinder group.
 463 *
 464 * In the latter case we colour the starting block by the callers PID to
 465 * prevent it from clashing with concurrent allocations for a different inode
 466 * in the same block group.   The PID is used here so that functionally related
 467 * files will be close-by on-disk.
 468 *
 469 *	Caller must make sure that @ind is valid and will stay that way.
 470 */
 471static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
 472{
 473	struct ext3_inode_info *ei = EXT3_I(inode);
 474	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
 475	__le32 *p;
 476	ext3_fsblk_t bg_start;
 477	ext3_grpblk_t colour;
 478
 479	/* Try to find previous block */
 480	for (p = ind->p - 1; p >= start; p--) {
 481		if (*p)
 482			return le32_to_cpu(*p);
 483	}
 484
 485	/* No such thing, so let's try location of indirect block */
 486	if (ind->bh)
 487		return ind->bh->b_blocknr;
 488
 489	/*
 490	 * It is going to be referred to from the inode itself? OK, just put it
 491	 * into the same cylinder group then.
 492	 */
 493	bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
 494	colour = (current->pid % 16) *
 495			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
 496	return bg_start + colour;
 497}
 498
 499/**
 500 *	ext3_find_goal - find a preferred place for allocation.
 501 *	@inode: owner
 502 *	@block:  block we want
 503 *	@partial: pointer to the last triple within a chain
 504 *
 505 *	Normally this function find the preferred place for block allocation,
 506 *	returns it.
 507 */
 508
 509static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
 510				   Indirect *partial)
 511{
 512	struct ext3_block_alloc_info *block_i;
 513
 514	block_i =  EXT3_I(inode)->i_block_alloc_info;
 515
 516	/*
 517	 * try the heuristic for sequential allocation,
 518	 * failing that at least try to get decent locality.
 519	 */
 520	if (block_i && (block == block_i->last_alloc_logical_block + 1)
 521		&& (block_i->last_alloc_physical_block != 0)) {
 522		return block_i->last_alloc_physical_block + 1;
 523	}
 524
 525	return ext3_find_near(inode, partial);
 526}
 527
 528/**
 529 *	ext3_blks_to_allocate - Look up the block map and count the number
 530 *	of direct blocks need to be allocated for the given branch.
 531 *
 532 *	@branch: chain of indirect blocks
 533 *	@k: number of blocks need for indirect blocks
 534 *	@blks: number of data blocks to be mapped.
 535 *	@blocks_to_boundary:  the offset in the indirect block
 536 *
 537 *	return the total number of blocks to be allocate, including the
 538 *	direct and indirect blocks.
 539 */
 540static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
 541		int blocks_to_boundary)
 542{
 543	unsigned long count = 0;
 544
 545	/*
 546	 * Simple case, [t,d]Indirect block(s) has not allocated yet
 547	 * then it's clear blocks on that path have not allocated
 548	 */
 549	if (k > 0) {
 550		/* right now we don't handle cross boundary allocation */
 551		if (blks < blocks_to_boundary + 1)
 552			count += blks;
 553		else
 554			count += blocks_to_boundary + 1;
 555		return count;
 556	}
 557
 558	count++;
 559	while (count < blks && count <= blocks_to_boundary &&
 560		le32_to_cpu(*(branch[0].p + count)) == 0) {
 561		count++;
 562	}
 563	return count;
 564}
 565
 566/**
 567 *	ext3_alloc_blocks - multiple allocate blocks needed for a branch
 568 *	@handle: handle for this transaction
 569 *	@inode: owner
 570 *	@goal: preferred place for allocation
 571 *	@indirect_blks: the number of blocks need to allocate for indirect
 572 *			blocks
 573 *	@blks:	number of blocks need to allocated for direct blocks
 574 *	@new_blocks: on return it will store the new block numbers for
 575 *	the indirect blocks(if needed) and the first direct block,
 576 *	@err: here we store the error value
 577 *
 578 *	return the number of direct blocks allocated
 579 */
 580static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
 581			ext3_fsblk_t goal, int indirect_blks, int blks,
 582			ext3_fsblk_t new_blocks[4], int *err)
 583{
 584	int target, i;
 585	unsigned long count = 0;
 586	int index = 0;
 587	ext3_fsblk_t current_block = 0;
 588	int ret = 0;
 589
 590	/*
 591	 * Here we try to allocate the requested multiple blocks at once,
 592	 * on a best-effort basis.
 593	 * To build a branch, we should allocate blocks for
 594	 * the indirect blocks(if not allocated yet), and at least
 595	 * the first direct block of this branch.  That's the
 596	 * minimum number of blocks need to allocate(required)
 597	 */
 598	target = blks + indirect_blks;
 599
 600	while (1) {
 601		count = target;
 602		/* allocating blocks for indirect blocks and direct blocks */
 603		current_block = ext3_new_blocks(handle,inode,goal,&count,err);
 604		if (*err)
 605			goto failed_out;
 606
 607		target -= count;
 608		/* allocate blocks for indirect blocks */
 609		while (index < indirect_blks && count) {
 610			new_blocks[index++] = current_block++;
 611			count--;
 612		}
 613
 614		if (count > 0)
 615			break;
 616	}
 617
 618	/* save the new block number for the first direct block */
 619	new_blocks[index] = current_block;
 620
 621	/* total number of blocks allocated for direct blocks */
 622	ret = count;
 623	*err = 0;
 624	return ret;
 625failed_out:
 626	for (i = 0; i <index; i++)
 627		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 628	return ret;
 629}
 630
 631/**
 632 *	ext3_alloc_branch - allocate and set up a chain of blocks.
 633 *	@handle: handle for this transaction
 634 *	@inode: owner
 635 *	@indirect_blks: number of allocated indirect blocks
 636 *	@blks: number of allocated direct blocks
 637 *	@goal: preferred place for allocation
 638 *	@offsets: offsets (in the blocks) to store the pointers to next.
 639 *	@branch: place to store the chain in.
 640 *
 641 *	This function allocates blocks, zeroes out all but the last one,
 642 *	links them into chain and (if we are synchronous) writes them to disk.
 643 *	In other words, it prepares a branch that can be spliced onto the
 644 *	inode. It stores the information about that chain in the branch[], in
 645 *	the same format as ext3_get_branch() would do. We are calling it after
 646 *	we had read the existing part of chain and partial points to the last
 647 *	triple of that (one with zero ->key). Upon the exit we have the same
 648 *	picture as after the successful ext3_get_block(), except that in one
 649 *	place chain is disconnected - *branch->p is still zero (we did not
 650 *	set the last link), but branch->key contains the number that should
 651 *	be placed into *branch->p to fill that gap.
 652 *
 653 *	If allocation fails we free all blocks we've allocated (and forget
 654 *	their buffer_heads) and return the error value the from failed
 655 *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
 656 *	as described above and return 0.
 657 */
 658static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
 659			int indirect_blks, int *blks, ext3_fsblk_t goal,
 660			int *offsets, Indirect *branch)
 661{
 662	int blocksize = inode->i_sb->s_blocksize;
 663	int i, n = 0;
 664	int err = 0;
 665	struct buffer_head *bh;
 666	int num;
 667	ext3_fsblk_t new_blocks[4];
 668	ext3_fsblk_t current_block;
 669
 670	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
 671				*blks, new_blocks, &err);
 672	if (err)
 673		return err;
 674
 675	branch[0].key = cpu_to_le32(new_blocks[0]);
 676	/*
 677	 * metadata blocks and data blocks are allocated.
 678	 */
 679	for (n = 1; n <= indirect_blks;  n++) {
 680		/*
 681		 * Get buffer_head for parent block, zero it out
 682		 * and set the pointer to new one, then send
 683		 * parent to disk.
 684		 */
 685		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
 686		branch[n].bh = bh;
 687		lock_buffer(bh);
 688		BUFFER_TRACE(bh, "call get_create_access");
 689		err = ext3_journal_get_create_access(handle, bh);
 690		if (err) {
 691			unlock_buffer(bh);
 692			brelse(bh);
 693			goto failed;
 694		}
 695
 696		memset(bh->b_data, 0, blocksize);
 697		branch[n].p = (__le32 *) bh->b_data + offsets[n];
 698		branch[n].key = cpu_to_le32(new_blocks[n]);
 699		*branch[n].p = branch[n].key;
 700		if ( n == indirect_blks) {
 701			current_block = new_blocks[n];
 702			/*
 703			 * End of chain, update the last new metablock of
 704			 * the chain to point to the new allocated
 705			 * data blocks numbers
 706			 */
 707			for (i=1; i < num; i++)
 708				*(branch[n].p + i) = cpu_to_le32(++current_block);
 709		}
 710		BUFFER_TRACE(bh, "marking uptodate");
 711		set_buffer_uptodate(bh);
 712		unlock_buffer(bh);
 713
 714		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
 715		err = ext3_journal_dirty_metadata(handle, bh);
 716		if (err)
 717			goto failed;
 718	}
 719	*blks = num;
 720	return err;
 721failed:
 722	/* Allocation failed, free what we already allocated */
 723	for (i = 1; i <= n ; i++) {
 724		BUFFER_TRACE(branch[i].bh, "call journal_forget");
 725		ext3_journal_forget(handle, branch[i].bh);
 726	}
 727	for (i = 0; i <indirect_blks; i++)
 728		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 729
 730	ext3_free_blocks(handle, inode, new_blocks[i], num);
 731
 732	return err;
 733}
 734
 735/**
 736 * ext3_splice_branch - splice the allocated branch onto inode.
 737 * @handle: handle for this transaction
 738 * @inode: owner
 739 * @block: (logical) number of block we are adding
 740 * @where: location of missing link
 741 * @num:   number of indirect blocks we are adding
 742 * @blks:  number of direct blocks we are adding
 743 *
 744 * This function fills the missing link and does all housekeeping needed in
 745 * inode (->i_blocks, etc.). In case of success we end up with the full
 746 * chain to new block and return 0.
 747 */
 748static int ext3_splice_branch(handle_t *handle, struct inode *inode,
 749			long block, Indirect *where, int num, int blks)
 750{
 751	int i;
 752	int err = 0;
 753	struct ext3_block_alloc_info *block_i;
 754	ext3_fsblk_t current_block;
 755	struct ext3_inode_info *ei = EXT3_I(inode);
 
 756
 757	block_i = ei->i_block_alloc_info;
 758	/*
 759	 * If we're splicing into a [td]indirect block (as opposed to the
 760	 * inode) then we need to get write access to the [td]indirect block
 761	 * before the splice.
 762	 */
 763	if (where->bh) {
 764		BUFFER_TRACE(where->bh, "get_write_access");
 765		err = ext3_journal_get_write_access(handle, where->bh);
 766		if (err)
 767			goto err_out;
 768	}
 769	/* That's it */
 770
 771	*where->p = where->key;
 772
 773	/*
 774	 * Update the host buffer_head or inode to point to more just allocated
 775	 * direct blocks blocks
 776	 */
 777	if (num == 0 && blks > 1) {
 778		current_block = le32_to_cpu(where->key) + 1;
 779		for (i = 1; i < blks; i++)
 780			*(where->p + i ) = cpu_to_le32(current_block++);
 781	}
 782
 783	/*
 784	 * update the most recently allocated logical & physical block
 785	 * in i_block_alloc_info, to assist find the proper goal block for next
 786	 * allocation
 787	 */
 788	if (block_i) {
 789		block_i->last_alloc_logical_block = block + blks - 1;
 790		block_i->last_alloc_physical_block =
 791				le32_to_cpu(where[num].key) + blks - 1;
 792	}
 793
 794	/* We are done with atomic stuff, now do the rest of housekeeping */
 795
 796	inode->i_ctime = CURRENT_TIME_SEC;
 797	ext3_mark_inode_dirty(handle, inode);
 
 
 798	/* ext3_mark_inode_dirty already updated i_sync_tid */
 799	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
 800
 801	/* had we spliced it onto indirect block? */
 802	if (where->bh) {
 803		/*
 804		 * If we spliced it onto an indirect block, we haven't
 805		 * altered the inode.  Note however that if it is being spliced
 806		 * onto an indirect block at the very end of the file (the
 807		 * file is growing) then we *will* alter the inode to reflect
 808		 * the new i_size.  But that is not done here - it is done in
 809		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
 810		 */
 811		jbd_debug(5, "splicing indirect only\n");
 812		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
 813		err = ext3_journal_dirty_metadata(handle, where->bh);
 814		if (err)
 815			goto err_out;
 816	} else {
 817		/*
 818		 * OK, we spliced it into the inode itself on a direct block.
 819		 * Inode was dirtied above.
 820		 */
 821		jbd_debug(5, "splicing direct\n");
 822	}
 823	return err;
 824
 825err_out:
 826	for (i = 1; i <= num; i++) {
 827		BUFFER_TRACE(where[i].bh, "call journal_forget");
 828		ext3_journal_forget(handle, where[i].bh);
 829		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
 830	}
 831	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
 832
 833	return err;
 834}
 835
 836/*
 837 * Allocation strategy is simple: if we have to allocate something, we will
 838 * have to go the whole way to leaf. So let's do it before attaching anything
 839 * to tree, set linkage between the newborn blocks, write them if sync is
 840 * required, recheck the path, free and repeat if check fails, otherwise
 841 * set the last missing link (that will protect us from any truncate-generated
 842 * removals - all blocks on the path are immune now) and possibly force the
 843 * write on the parent block.
 844 * That has a nice additional property: no special recovery from the failed
 845 * allocations is needed - we simply release blocks and do not touch anything
 846 * reachable from inode.
 847 *
 848 * `handle' can be NULL if create == 0.
 849 *
 850 * The BKL may not be held on entry here.  Be sure to take it early.
 851 * return > 0, # of blocks mapped or allocated.
 852 * return = 0, if plain lookup failed.
 853 * return < 0, error case.
 854 */
 855int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
 856		sector_t iblock, unsigned long maxblocks,
 857		struct buffer_head *bh_result,
 858		int create)
 859{
 860	int err = -EIO;
 861	int offsets[4];
 862	Indirect chain[4];
 863	Indirect *partial;
 864	ext3_fsblk_t goal;
 865	int indirect_blks;
 866	int blocks_to_boundary = 0;
 867	int depth;
 868	struct ext3_inode_info *ei = EXT3_I(inode);
 869	int count = 0;
 870	ext3_fsblk_t first_block = 0;
 871
 872
 873	trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
 874	J_ASSERT(handle != NULL || create == 0);
 875	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 876
 877	if (depth == 0)
 878		goto out;
 879
 880	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 881
 882	/* Simplest case - block found, no allocation needed */
 883	if (!partial) {
 884		first_block = le32_to_cpu(chain[depth - 1].key);
 885		clear_buffer_new(bh_result);
 886		count++;
 887		/*map more blocks*/
 888		while (count < maxblocks && count <= blocks_to_boundary) {
 889			ext3_fsblk_t blk;
 890
 891			if (!verify_chain(chain, chain + depth - 1)) {
 892				/*
 893				 * Indirect block might be removed by
 894				 * truncate while we were reading it.
 895				 * Handling of that case: forget what we've
 896				 * got now. Flag the err as EAGAIN, so it
 897				 * will reread.
 898				 */
 899				err = -EAGAIN;
 900				count = 0;
 901				break;
 902			}
 903			blk = le32_to_cpu(*(chain[depth-1].p + count));
 904
 905			if (blk == first_block + count)
 906				count++;
 907			else
 908				break;
 909		}
 910		if (err != -EAGAIN)
 911			goto got_it;
 912	}
 913
 914	/* Next simple case - plain lookup or failed read of indirect block */
 915	if (!create || err == -EIO)
 916		goto cleanup;
 917
 918	/*
 919	 * Block out ext3_truncate while we alter the tree
 920	 */
 921	mutex_lock(&ei->truncate_mutex);
 922
 923	/*
 924	 * If the indirect block is missing while we are reading
 925	 * the chain(ext3_get_branch() returns -EAGAIN err), or
 926	 * if the chain has been changed after we grab the semaphore,
 927	 * (either because another process truncated this branch, or
 928	 * another get_block allocated this branch) re-grab the chain to see if
 929	 * the request block has been allocated or not.
 930	 *
 931	 * Since we already block the truncate/other get_block
 932	 * at this point, we will have the current copy of the chain when we
 933	 * splice the branch into the tree.
 934	 */
 935	if (err == -EAGAIN || !verify_chain(chain, partial)) {
 936		while (partial > chain) {
 937			brelse(partial->bh);
 938			partial--;
 939		}
 940		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 941		if (!partial) {
 942			count++;
 943			mutex_unlock(&ei->truncate_mutex);
 944			if (err)
 945				goto cleanup;
 946			clear_buffer_new(bh_result);
 947			goto got_it;
 948		}
 949	}
 950
 951	/*
 952	 * Okay, we need to do block allocation.  Lazily initialize the block
 953	 * allocation info here if necessary
 954	*/
 955	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
 956		ext3_init_block_alloc_info(inode);
 957
 958	goal = ext3_find_goal(inode, iblock, partial);
 959
 960	/* the number of blocks need to allocate for [d,t]indirect blocks */
 961	indirect_blks = (chain + depth) - partial - 1;
 962
 963	/*
 964	 * Next look up the indirect map to count the totoal number of
 965	 * direct blocks to allocate for this branch.
 966	 */
 967	count = ext3_blks_to_allocate(partial, indirect_blks,
 968					maxblocks, blocks_to_boundary);
 969	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
 970				offsets + (partial - chain), partial);
 971
 972	/*
 973	 * The ext3_splice_branch call will free and forget any buffers
 974	 * on the new chain if there is a failure, but that risks using
 975	 * up transaction credits, especially for bitmaps where the
 976	 * credits cannot be returned.  Can we handle this somehow?  We
 977	 * may need to return -EAGAIN upwards in the worst case.  --sct
 978	 */
 979	if (!err)
 980		err = ext3_splice_branch(handle, inode, iblock,
 981					partial, indirect_blks, count);
 982	mutex_unlock(&ei->truncate_mutex);
 983	if (err)
 984		goto cleanup;
 985
 986	set_buffer_new(bh_result);
 987got_it:
 988	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
 989	if (count > blocks_to_boundary)
 990		set_buffer_boundary(bh_result);
 991	err = count;
 992	/* Clean up and exit */
 993	partial = chain + depth - 1;	/* the whole chain */
 994cleanup:
 995	while (partial > chain) {
 996		BUFFER_TRACE(partial->bh, "call brelse");
 997		brelse(partial->bh);
 998		partial--;
 999	}
1000	BUFFER_TRACE(bh_result, "returned");
1001out:
1002	trace_ext3_get_blocks_exit(inode, iblock,
1003				   depth ? le32_to_cpu(chain[depth-1].key) : 0,
1004				   count, err);
1005	return err;
1006}
1007
1008/* Maximum number of blocks we map for direct IO at once. */
1009#define DIO_MAX_BLOCKS 4096
1010/*
1011 * Number of credits we need for writing DIO_MAX_BLOCKS:
1012 * We need sb + group descriptor + bitmap + inode -> 4
1013 * For B blocks with A block pointers per block we need:
1014 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
1015 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
1016 */
1017#define DIO_CREDITS 25
1018
1019static int ext3_get_block(struct inode *inode, sector_t iblock,
1020			struct buffer_head *bh_result, int create)
1021{
1022	handle_t *handle = ext3_journal_current_handle();
1023	int ret = 0, started = 0;
1024	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1025
1026	if (create && !handle) {	/* Direct IO write... */
1027		if (max_blocks > DIO_MAX_BLOCKS)
1028			max_blocks = DIO_MAX_BLOCKS;
1029		handle = ext3_journal_start(inode, DIO_CREDITS +
1030				EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
1031		if (IS_ERR(handle)) {
1032			ret = PTR_ERR(handle);
1033			goto out;
1034		}
1035		started = 1;
1036	}
1037
1038	ret = ext3_get_blocks_handle(handle, inode, iblock,
1039					max_blocks, bh_result, create);
1040	if (ret > 0) {
1041		bh_result->b_size = (ret << inode->i_blkbits);
1042		ret = 0;
1043	}
1044	if (started)
1045		ext3_journal_stop(handle);
1046out:
1047	return ret;
1048}
1049
1050int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1051		u64 start, u64 len)
1052{
1053	return generic_block_fiemap(inode, fieinfo, start, len,
1054				    ext3_get_block);
1055}
1056
1057/*
1058 * `handle' can be NULL if create is zero
1059 */
1060struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1061				long block, int create, int *errp)
1062{
1063	struct buffer_head dummy;
1064	int fatal = 0, err;
1065
1066	J_ASSERT(handle != NULL || create == 0);
1067
1068	dummy.b_state = 0;
1069	dummy.b_blocknr = -1000;
1070	buffer_trace_init(&dummy.b_history);
1071	err = ext3_get_blocks_handle(handle, inode, block, 1,
1072					&dummy, create);
1073	/*
1074	 * ext3_get_blocks_handle() returns number of blocks
1075	 * mapped. 0 in case of a HOLE.
1076	 */
1077	if (err > 0) {
1078		if (err > 1)
1079			WARN_ON(1);
1080		err = 0;
1081	}
1082	*errp = err;
1083	if (!err && buffer_mapped(&dummy)) {
1084		struct buffer_head *bh;
1085		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1086		if (!bh) {
1087			*errp = -EIO;
1088			goto err;
1089		}
1090		if (buffer_new(&dummy)) {
1091			J_ASSERT(create != 0);
1092			J_ASSERT(handle != NULL);
1093
1094			/*
1095			 * Now that we do not always journal data, we should
1096			 * keep in mind whether this should always journal the
1097			 * new buffer as metadata.  For now, regular file
1098			 * writes use ext3_get_block instead, so it's not a
1099			 * problem.
1100			 */
1101			lock_buffer(bh);
1102			BUFFER_TRACE(bh, "call get_create_access");
1103			fatal = ext3_journal_get_create_access(handle, bh);
1104			if (!fatal && !buffer_uptodate(bh)) {
1105				memset(bh->b_data,0,inode->i_sb->s_blocksize);
1106				set_buffer_uptodate(bh);
1107			}
1108			unlock_buffer(bh);
1109			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1110			err = ext3_journal_dirty_metadata(handle, bh);
1111			if (!fatal)
1112				fatal = err;
1113		} else {
1114			BUFFER_TRACE(bh, "not a new buffer");
1115		}
1116		if (fatal) {
1117			*errp = fatal;
1118			brelse(bh);
1119			bh = NULL;
1120		}
1121		return bh;
1122	}
1123err:
1124	return NULL;
1125}
1126
1127struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1128			       int block, int create, int *err)
1129{
1130	struct buffer_head * bh;
1131
1132	bh = ext3_getblk(handle, inode, block, create, err);
1133	if (!bh)
1134		return bh;
1135	if (buffer_uptodate(bh))
1136		return bh;
1137	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
 
 
1138	wait_on_buffer(bh);
1139	if (buffer_uptodate(bh))
1140		return bh;
1141	put_bh(bh);
1142	*err = -EIO;
1143	return NULL;
1144}
1145
1146static int walk_page_buffers(	handle_t *handle,
1147				struct buffer_head *head,
1148				unsigned from,
1149				unsigned to,
1150				int *partial,
1151				int (*fn)(	handle_t *handle,
1152						struct buffer_head *bh))
1153{
1154	struct buffer_head *bh;
1155	unsigned block_start, block_end;
1156	unsigned blocksize = head->b_size;
1157	int err, ret = 0;
1158	struct buffer_head *next;
1159
1160	for (	bh = head, block_start = 0;
1161		ret == 0 && (bh != head || !block_start);
1162		block_start = block_end, bh = next)
1163	{
1164		next = bh->b_this_page;
1165		block_end = block_start + blocksize;
1166		if (block_end <= from || block_start >= to) {
1167			if (partial && !buffer_uptodate(bh))
1168				*partial = 1;
1169			continue;
1170		}
1171		err = (*fn)(handle, bh);
1172		if (!ret)
1173			ret = err;
1174	}
1175	return ret;
1176}
1177
1178/*
1179 * To preserve ordering, it is essential that the hole instantiation and
1180 * the data write be encapsulated in a single transaction.  We cannot
1181 * close off a transaction and start a new one between the ext3_get_block()
1182 * and the commit_write().  So doing the journal_start at the start of
1183 * prepare_write() is the right place.
1184 *
1185 * Also, this function can nest inside ext3_writepage() ->
1186 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1187 * has generated enough buffer credits to do the whole page.  So we won't
1188 * block on the journal in that case, which is good, because the caller may
1189 * be PF_MEMALLOC.
1190 *
1191 * By accident, ext3 can be reentered when a transaction is open via
1192 * quota file writes.  If we were to commit the transaction while thus
1193 * reentered, there can be a deadlock - we would be holding a quota
1194 * lock, and the commit would never complete if another thread had a
1195 * transaction open and was blocking on the quota lock - a ranking
1196 * violation.
1197 *
1198 * So what we do is to rely on the fact that journal_stop/journal_start
1199 * will _not_ run commit under these circumstances because handle->h_ref
1200 * is elevated.  We'll still have enough credits for the tiny quotafile
1201 * write.
1202 */
1203static int do_journal_get_write_access(handle_t *handle,
1204					struct buffer_head *bh)
1205{
1206	int dirty = buffer_dirty(bh);
1207	int ret;
1208
1209	if (!buffer_mapped(bh) || buffer_freed(bh))
1210		return 0;
1211	/*
1212	 * __block_prepare_write() could have dirtied some buffers. Clean
1213	 * the dirty bit as jbd2_journal_get_write_access() could complain
1214	 * otherwise about fs integrity issues. Setting of the dirty bit
1215	 * by __block_prepare_write() isn't a real problem here as we clear
1216	 * the bit before releasing a page lock and thus writeback cannot
1217	 * ever write the buffer.
1218	 */
1219	if (dirty)
1220		clear_buffer_dirty(bh);
1221	ret = ext3_journal_get_write_access(handle, bh);
1222	if (!ret && dirty)
1223		ret = ext3_journal_dirty_metadata(handle, bh);
1224	return ret;
1225}
1226
1227/*
1228 * Truncate blocks that were not used by write. We have to truncate the
1229 * pagecache as well so that corresponding buffers get properly unmapped.
1230 */
1231static void ext3_truncate_failed_write(struct inode *inode)
1232{
1233	truncate_inode_pages(inode->i_mapping, inode->i_size);
1234	ext3_truncate(inode);
1235}
1236
1237/*
1238 * Truncate blocks that were not used by direct IO write. We have to zero out
1239 * the last file block as well because direct IO might have written to it.
1240 */
1241static void ext3_truncate_failed_direct_write(struct inode *inode)
1242{
1243	ext3_block_truncate_page(inode, inode->i_size);
1244	ext3_truncate(inode);
1245}
1246
1247static int ext3_write_begin(struct file *file, struct address_space *mapping,
1248				loff_t pos, unsigned len, unsigned flags,
1249				struct page **pagep, void **fsdata)
1250{
1251	struct inode *inode = mapping->host;
1252	int ret;
1253	handle_t *handle;
1254	int retries = 0;
1255	struct page *page;
1256	pgoff_t index;
1257	unsigned from, to;
1258	/* Reserve one block more for addition to orphan list in case
1259	 * we allocate blocks but write fails for some reason */
1260	int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
1261
1262	trace_ext3_write_begin(inode, pos, len, flags);
1263
1264	index = pos >> PAGE_CACHE_SHIFT;
1265	from = pos & (PAGE_CACHE_SIZE - 1);
1266	to = from + len;
1267
1268retry:
1269	page = grab_cache_page_write_begin(mapping, index, flags);
1270	if (!page)
1271		return -ENOMEM;
1272	*pagep = page;
1273
1274	handle = ext3_journal_start(inode, needed_blocks);
1275	if (IS_ERR(handle)) {
1276		unlock_page(page);
1277		page_cache_release(page);
1278		ret = PTR_ERR(handle);
1279		goto out;
1280	}
1281	ret = __block_write_begin(page, pos, len, ext3_get_block);
1282	if (ret)
1283		goto write_begin_failed;
1284
1285	if (ext3_should_journal_data(inode)) {
1286		ret = walk_page_buffers(handle, page_buffers(page),
1287				from, to, NULL, do_journal_get_write_access);
1288	}
1289write_begin_failed:
1290	if (ret) {
1291		/*
1292		 * block_write_begin may have instantiated a few blocks
1293		 * outside i_size.  Trim these off again. Don't need
1294		 * i_size_read because we hold i_mutex.
1295		 *
1296		 * Add inode to orphan list in case we crash before truncate
1297		 * finishes. Do this only if ext3_can_truncate() agrees so
1298		 * that orphan processing code is happy.
1299		 */
1300		if (pos + len > inode->i_size && ext3_can_truncate(inode))
1301			ext3_orphan_add(handle, inode);
1302		ext3_journal_stop(handle);
1303		unlock_page(page);
1304		page_cache_release(page);
1305		if (pos + len > inode->i_size)
1306			ext3_truncate_failed_write(inode);
1307	}
1308	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1309		goto retry;
1310out:
1311	return ret;
1312}
1313
1314
1315int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1316{
1317	int err = journal_dirty_data(handle, bh);
1318	if (err)
1319		ext3_journal_abort_handle(__func__, __func__,
1320						bh, handle, err);
1321	return err;
1322}
1323
1324/* For ordered writepage and write_end functions */
1325static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1326{
1327	/*
1328	 * Write could have mapped the buffer but it didn't copy the data in
1329	 * yet. So avoid filing such buffer into a transaction.
1330	 */
1331	if (buffer_mapped(bh) && buffer_uptodate(bh))
1332		return ext3_journal_dirty_data(handle, bh);
1333	return 0;
1334}
1335
1336/* For write_end() in data=journal mode */
1337static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1338{
1339	if (!buffer_mapped(bh) || buffer_freed(bh))
1340		return 0;
1341	set_buffer_uptodate(bh);
1342	return ext3_journal_dirty_metadata(handle, bh);
1343}
1344
1345/*
1346 * This is nasty and subtle: ext3_write_begin() could have allocated blocks
1347 * for the whole page but later we failed to copy the data in. Update inode
1348 * size according to what we managed to copy. The rest is going to be
1349 * truncated in write_end function.
1350 */
1351static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
1352{
1353	/* What matters to us is i_disksize. We don't write i_size anywhere */
1354	if (pos + copied > inode->i_size)
1355		i_size_write(inode, pos + copied);
1356	if (pos + copied > EXT3_I(inode)->i_disksize) {
1357		EXT3_I(inode)->i_disksize = pos + copied;
1358		mark_inode_dirty(inode);
1359	}
1360}
1361
1362/*
1363 * We need to pick up the new inode size which generic_commit_write gave us
1364 * `file' can be NULL - eg, when called from page_symlink().
1365 *
1366 * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1367 * buffers are managed internally.
1368 */
1369static int ext3_ordered_write_end(struct file *file,
1370				struct address_space *mapping,
1371				loff_t pos, unsigned len, unsigned copied,
1372				struct page *page, void *fsdata)
1373{
1374	handle_t *handle = ext3_journal_current_handle();
1375	struct inode *inode = file->f_mapping->host;
1376	unsigned from, to;
1377	int ret = 0, ret2;
1378
1379	trace_ext3_ordered_write_end(inode, pos, len, copied);
1380	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1381
1382	from = pos & (PAGE_CACHE_SIZE - 1);
1383	to = from + copied;
1384	ret = walk_page_buffers(handle, page_buffers(page),
1385		from, to, NULL, journal_dirty_data_fn);
1386
1387	if (ret == 0)
1388		update_file_sizes(inode, pos, copied);
1389	/*
1390	 * There may be allocated blocks outside of i_size because
1391	 * we failed to copy some data. Prepare for truncate.
1392	 */
1393	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1394		ext3_orphan_add(handle, inode);
1395	ret2 = ext3_journal_stop(handle);
1396	if (!ret)
1397		ret = ret2;
1398	unlock_page(page);
1399	page_cache_release(page);
1400
1401	if (pos + len > inode->i_size)
1402		ext3_truncate_failed_write(inode);
1403	return ret ? ret : copied;
1404}
1405
1406static int ext3_writeback_write_end(struct file *file,
1407				struct address_space *mapping,
1408				loff_t pos, unsigned len, unsigned copied,
1409				struct page *page, void *fsdata)
1410{
1411	handle_t *handle = ext3_journal_current_handle();
1412	struct inode *inode = file->f_mapping->host;
1413	int ret;
1414
1415	trace_ext3_writeback_write_end(inode, pos, len, copied);
1416	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1417	update_file_sizes(inode, pos, copied);
1418	/*
1419	 * There may be allocated blocks outside of i_size because
1420	 * we failed to copy some data. Prepare for truncate.
1421	 */
1422	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1423		ext3_orphan_add(handle, inode);
1424	ret = ext3_journal_stop(handle);
1425	unlock_page(page);
1426	page_cache_release(page);
1427
1428	if (pos + len > inode->i_size)
1429		ext3_truncate_failed_write(inode);
1430	return ret ? ret : copied;
1431}
1432
1433static int ext3_journalled_write_end(struct file *file,
1434				struct address_space *mapping,
1435				loff_t pos, unsigned len, unsigned copied,
1436				struct page *page, void *fsdata)
1437{
1438	handle_t *handle = ext3_journal_current_handle();
1439	struct inode *inode = mapping->host;
1440	struct ext3_inode_info *ei = EXT3_I(inode);
1441	int ret = 0, ret2;
1442	int partial = 0;
1443	unsigned from, to;
1444
1445	trace_ext3_journalled_write_end(inode, pos, len, copied);
1446	from = pos & (PAGE_CACHE_SIZE - 1);
1447	to = from + len;
1448
1449	if (copied < len) {
1450		if (!PageUptodate(page))
1451			copied = 0;
1452		page_zero_new_buffers(page, from + copied, to);
1453		to = from + copied;
1454	}
1455
1456	ret = walk_page_buffers(handle, page_buffers(page), from,
1457				to, &partial, write_end_fn);
1458	if (!partial)
1459		SetPageUptodate(page);
1460
1461	if (pos + copied > inode->i_size)
1462		i_size_write(inode, pos + copied);
1463	/*
1464	 * There may be allocated blocks outside of i_size because
1465	 * we failed to copy some data. Prepare for truncate.
1466	 */
1467	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1468		ext3_orphan_add(handle, inode);
1469	ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1470	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
1471	if (inode->i_size > ei->i_disksize) {
1472		ei->i_disksize = inode->i_size;
1473		ret2 = ext3_mark_inode_dirty(handle, inode);
1474		if (!ret)
1475			ret = ret2;
1476	}
1477
1478	ret2 = ext3_journal_stop(handle);
1479	if (!ret)
1480		ret = ret2;
1481	unlock_page(page);
1482	page_cache_release(page);
1483
1484	if (pos + len > inode->i_size)
1485		ext3_truncate_failed_write(inode);
1486	return ret ? ret : copied;
1487}
1488
1489/*
1490 * bmap() is special.  It gets used by applications such as lilo and by
1491 * the swapper to find the on-disk block of a specific piece of data.
1492 *
1493 * Naturally, this is dangerous if the block concerned is still in the
1494 * journal.  If somebody makes a swapfile on an ext3 data-journaling
1495 * filesystem and enables swap, then they may get a nasty shock when the
1496 * data getting swapped to that swapfile suddenly gets overwritten by
1497 * the original zero's written out previously to the journal and
1498 * awaiting writeback in the kernel's buffer cache.
1499 *
1500 * So, if we see any bmap calls here on a modified, data-journaled file,
1501 * take extra steps to flush any blocks which might be in the cache.
1502 */
1503static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1504{
1505	struct inode *inode = mapping->host;
1506	journal_t *journal;
1507	int err;
1508
1509	if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
1510		/*
1511		 * This is a REALLY heavyweight approach, but the use of
1512		 * bmap on dirty files is expected to be extremely rare:
1513		 * only if we run lilo or swapon on a freshly made file
1514		 * do we expect this to happen.
1515		 *
1516		 * (bmap requires CAP_SYS_RAWIO so this does not
1517		 * represent an unprivileged user DOS attack --- we'd be
1518		 * in trouble if mortal users could trigger this path at
1519		 * will.)
1520		 *
1521		 * NB. EXT3_STATE_JDATA is not set on files other than
1522		 * regular files.  If somebody wants to bmap a directory
1523		 * or symlink and gets confused because the buffer
1524		 * hasn't yet been flushed to disk, they deserve
1525		 * everything they get.
1526		 */
1527
1528		ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
1529		journal = EXT3_JOURNAL(inode);
1530		journal_lock_updates(journal);
1531		err = journal_flush(journal);
1532		journal_unlock_updates(journal);
1533
1534		if (err)
1535			return 0;
1536	}
1537
1538	return generic_block_bmap(mapping,block,ext3_get_block);
1539}
1540
1541static int bget_one(handle_t *handle, struct buffer_head *bh)
1542{
1543	get_bh(bh);
1544	return 0;
1545}
1546
1547static int bput_one(handle_t *handle, struct buffer_head *bh)
1548{
1549	put_bh(bh);
1550	return 0;
1551}
1552
1553static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
1554{
1555	return !buffer_mapped(bh);
1556}
1557
1558/*
1559 * Note that we always start a transaction even if we're not journalling
1560 * data.  This is to preserve ordering: any hole instantiation within
1561 * __block_write_full_page -> ext3_get_block() should be journalled
1562 * along with the data so we don't crash and then get metadata which
1563 * refers to old data.
1564 *
1565 * In all journalling modes block_write_full_page() will start the I/O.
1566 *
1567 * Problem:
1568 *
1569 *	ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1570 *		ext3_writepage()
1571 *
1572 * Similar for:
1573 *
1574 *	ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1575 *
1576 * Same applies to ext3_get_block().  We will deadlock on various things like
1577 * lock_journal and i_truncate_mutex.
1578 *
1579 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1580 * allocations fail.
1581 *
1582 * 16May01: If we're reentered then journal_current_handle() will be
1583 *	    non-zero. We simply *return*.
1584 *
1585 * 1 July 2001: @@@ FIXME:
1586 *   In journalled data mode, a data buffer may be metadata against the
1587 *   current transaction.  But the same file is part of a shared mapping
1588 *   and someone does a writepage() on it.
1589 *
1590 *   We will move the buffer onto the async_data list, but *after* it has
1591 *   been dirtied. So there's a small window where we have dirty data on
1592 *   BJ_Metadata.
1593 *
1594 *   Note that this only applies to the last partial page in the file.  The
1595 *   bit which block_write_full_page() uses prepare/commit for.  (That's
1596 *   broken code anyway: it's wrong for msync()).
1597 *
1598 *   It's a rare case: affects the final partial page, for journalled data
1599 *   where the file is subject to bith write() and writepage() in the same
1600 *   transction.  To fix it we'll need a custom block_write_full_page().
1601 *   We'll probably need that anyway for journalling writepage() output.
1602 *
1603 * We don't honour synchronous mounts for writepage().  That would be
1604 * disastrous.  Any write() or metadata operation will sync the fs for
1605 * us.
1606 *
1607 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1608 * we don't need to open a transaction here.
1609 */
1610static int ext3_ordered_writepage(struct page *page,
1611				struct writeback_control *wbc)
1612{
1613	struct inode *inode = page->mapping->host;
1614	struct buffer_head *page_bufs;
1615	handle_t *handle = NULL;
1616	int ret = 0;
1617	int err;
1618
1619	J_ASSERT(PageLocked(page));
1620	WARN_ON_ONCE(IS_RDONLY(inode));
 
 
 
 
 
 
1621
1622	/*
1623	 * We give up here if we're reentered, because it might be for a
1624	 * different filesystem.
1625	 */
1626	if (ext3_journal_current_handle())
1627		goto out_fail;
1628
1629	trace_ext3_ordered_writepage(page);
1630	if (!page_has_buffers(page)) {
1631		create_empty_buffers(page, inode->i_sb->s_blocksize,
1632				(1 << BH_Dirty)|(1 << BH_Uptodate));
1633		page_bufs = page_buffers(page);
1634	} else {
1635		page_bufs = page_buffers(page);
1636		if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
1637				       NULL, buffer_unmapped)) {
1638			/* Provide NULL get_block() to catch bugs if buffers
1639			 * weren't really mapped */
1640			return block_write_full_page(page, NULL, wbc);
1641		}
1642	}
1643	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1644
1645	if (IS_ERR(handle)) {
1646		ret = PTR_ERR(handle);
1647		goto out_fail;
1648	}
1649
1650	walk_page_buffers(handle, page_bufs, 0,
1651			PAGE_CACHE_SIZE, NULL, bget_one);
1652
1653	ret = block_write_full_page(page, ext3_get_block, wbc);
1654
1655	/*
1656	 * The page can become unlocked at any point now, and
1657	 * truncate can then come in and change things.  So we
1658	 * can't touch *page from now on.  But *page_bufs is
1659	 * safe due to elevated refcount.
1660	 */
1661
1662	/*
1663	 * And attach them to the current transaction.  But only if
1664	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
1665	 * and generally junk.
1666	 */
1667	if (ret == 0) {
1668		err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1669					NULL, journal_dirty_data_fn);
1670		if (!ret)
1671			ret = err;
1672	}
1673	walk_page_buffers(handle, page_bufs, 0,
1674			PAGE_CACHE_SIZE, NULL, bput_one);
1675	err = ext3_journal_stop(handle);
1676	if (!ret)
1677		ret = err;
1678	return ret;
1679
1680out_fail:
1681	redirty_page_for_writepage(wbc, page);
1682	unlock_page(page);
1683	return ret;
1684}
1685
1686static int ext3_writeback_writepage(struct page *page,
1687				struct writeback_control *wbc)
1688{
1689	struct inode *inode = page->mapping->host;
1690	handle_t *handle = NULL;
1691	int ret = 0;
1692	int err;
1693
1694	J_ASSERT(PageLocked(page));
1695	WARN_ON_ONCE(IS_RDONLY(inode));
 
 
 
 
 
 
1696
1697	if (ext3_journal_current_handle())
1698		goto out_fail;
1699
1700	trace_ext3_writeback_writepage(page);
1701	if (page_has_buffers(page)) {
1702		if (!walk_page_buffers(NULL, page_buffers(page), 0,
1703				      PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
1704			/* Provide NULL get_block() to catch bugs if buffers
1705			 * weren't really mapped */
1706			return block_write_full_page(page, NULL, wbc);
1707		}
1708	}
1709
1710	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1711	if (IS_ERR(handle)) {
1712		ret = PTR_ERR(handle);
1713		goto out_fail;
1714	}
1715
1716	ret = block_write_full_page(page, ext3_get_block, wbc);
1717
1718	err = ext3_journal_stop(handle);
1719	if (!ret)
1720		ret = err;
1721	return ret;
1722
1723out_fail:
1724	redirty_page_for_writepage(wbc, page);
1725	unlock_page(page);
1726	return ret;
1727}
1728
1729static int ext3_journalled_writepage(struct page *page,
1730				struct writeback_control *wbc)
1731{
1732	struct inode *inode = page->mapping->host;
1733	handle_t *handle = NULL;
1734	int ret = 0;
1735	int err;
1736
1737	J_ASSERT(PageLocked(page));
1738	WARN_ON_ONCE(IS_RDONLY(inode));
 
 
 
 
 
 
1739
1740	if (ext3_journal_current_handle())
1741		goto no_write;
1742
1743	trace_ext3_journalled_writepage(page);
1744	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1745	if (IS_ERR(handle)) {
1746		ret = PTR_ERR(handle);
1747		goto no_write;
1748	}
1749
1750	if (!page_has_buffers(page) || PageChecked(page)) {
1751		/*
1752		 * It's mmapped pagecache.  Add buffers and journal it.  There
1753		 * doesn't seem much point in redirtying the page here.
1754		 */
1755		ClearPageChecked(page);
1756		ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
1757					  ext3_get_block);
1758		if (ret != 0) {
1759			ext3_journal_stop(handle);
1760			goto out_unlock;
1761		}
1762		ret = walk_page_buffers(handle, page_buffers(page), 0,
1763			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1764
1765		err = walk_page_buffers(handle, page_buffers(page), 0,
1766				PAGE_CACHE_SIZE, NULL, write_end_fn);
1767		if (ret == 0)
1768			ret = err;
1769		ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1770		atomic_set(&EXT3_I(inode)->i_datasync_tid,
1771			   handle->h_transaction->t_tid);
1772		unlock_page(page);
1773	} else {
1774		/*
1775		 * It may be a page full of checkpoint-mode buffers.  We don't
1776		 * really know unless we go poke around in the buffer_heads.
1777		 * But block_write_full_page will do the right thing.
1778		 */
1779		ret = block_write_full_page(page, ext3_get_block, wbc);
1780	}
1781	err = ext3_journal_stop(handle);
1782	if (!ret)
1783		ret = err;
1784out:
1785	return ret;
1786
1787no_write:
1788	redirty_page_for_writepage(wbc, page);
1789out_unlock:
1790	unlock_page(page);
1791	goto out;
1792}
1793
1794static int ext3_readpage(struct file *file, struct page *page)
1795{
1796	trace_ext3_readpage(page);
1797	return mpage_readpage(page, ext3_get_block);
1798}
1799
1800static int
1801ext3_readpages(struct file *file, struct address_space *mapping,
1802		struct list_head *pages, unsigned nr_pages)
1803{
1804	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1805}
1806
1807static void ext3_invalidatepage(struct page *page, unsigned long offset)
1808{
1809	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1810
1811	trace_ext3_invalidatepage(page, offset);
1812
1813	/*
1814	 * If it's a full truncate we just forget about the pending dirtying
1815	 */
1816	if (offset == 0)
1817		ClearPageChecked(page);
1818
1819	journal_invalidatepage(journal, page, offset);
1820}
1821
1822static int ext3_releasepage(struct page *page, gfp_t wait)
1823{
1824	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1825
1826	trace_ext3_releasepage(page);
1827	WARN_ON(PageChecked(page));
1828	if (!page_has_buffers(page))
1829		return 0;
1830	return journal_try_to_free_buffers(journal, page, wait);
1831}
1832
1833/*
1834 * If the O_DIRECT write will extend the file then add this inode to the
1835 * orphan list.  So recovery will truncate it back to the original size
1836 * if the machine crashes during the write.
1837 *
1838 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1839 * crashes then stale disk data _may_ be exposed inside the file. But current
1840 * VFS code falls back into buffered path in that case so we are safe.
1841 */
1842static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1843			const struct iovec *iov, loff_t offset,
1844			unsigned long nr_segs)
1845{
1846	struct file *file = iocb->ki_filp;
1847	struct inode *inode = file->f_mapping->host;
1848	struct ext3_inode_info *ei = EXT3_I(inode);
1849	handle_t *handle;
1850	ssize_t ret;
1851	int orphan = 0;
1852	size_t count = iov_length(iov, nr_segs);
1853	int retries = 0;
1854
1855	trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
1856
1857	if (rw == WRITE) {
1858		loff_t final_size = offset + count;
1859
1860		if (final_size > inode->i_size) {
1861			/* Credits for sb + inode write */
1862			handle = ext3_journal_start(inode, 2);
1863			if (IS_ERR(handle)) {
1864				ret = PTR_ERR(handle);
1865				goto out;
1866			}
1867			ret = ext3_orphan_add(handle, inode);
1868			if (ret) {
1869				ext3_journal_stop(handle);
1870				goto out;
1871			}
1872			orphan = 1;
1873			ei->i_disksize = inode->i_size;
1874			ext3_journal_stop(handle);
1875		}
1876	}
1877
1878retry:
1879	ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1880				 ext3_get_block);
1881	/*
1882	 * In case of error extending write may have instantiated a few
1883	 * blocks outside i_size. Trim these off again.
1884	 */
1885	if (unlikely((rw & WRITE) && ret < 0)) {
1886		loff_t isize = i_size_read(inode);
1887		loff_t end = offset + iov_length(iov, nr_segs);
1888
1889		if (end > isize)
1890			ext3_truncate_failed_direct_write(inode);
1891	}
1892	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1893		goto retry;
1894
1895	if (orphan) {
1896		int err;
1897
1898		/* Credits for sb + inode write */
1899		handle = ext3_journal_start(inode, 2);
1900		if (IS_ERR(handle)) {
1901			/* This is really bad luck. We've written the data
1902			 * but cannot extend i_size. Truncate allocated blocks
1903			 * and pretend the write failed... */
1904			ext3_truncate_failed_direct_write(inode);
1905			ret = PTR_ERR(handle);
1906			goto out;
1907		}
1908		if (inode->i_nlink)
1909			ext3_orphan_del(handle, inode);
1910		if (ret > 0) {
1911			loff_t end = offset + ret;
1912			if (end > inode->i_size) {
1913				ei->i_disksize = end;
1914				i_size_write(inode, end);
1915				/*
1916				 * We're going to return a positive `ret'
1917				 * here due to non-zero-length I/O, so there's
1918				 * no way of reporting error returns from
1919				 * ext3_mark_inode_dirty() to userspace.  So
1920				 * ignore it.
1921				 */
1922				ext3_mark_inode_dirty(handle, inode);
1923			}
1924		}
1925		err = ext3_journal_stop(handle);
1926		if (ret == 0)
1927			ret = err;
1928	}
1929out:
1930	trace_ext3_direct_IO_exit(inode, offset,
1931				iov_length(iov, nr_segs), rw, ret);
1932	return ret;
1933}
1934
1935/*
1936 * Pages can be marked dirty completely asynchronously from ext3's journalling
1937 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1938 * much here because ->set_page_dirty is called under VFS locks.  The page is
1939 * not necessarily locked.
1940 *
1941 * We cannot just dirty the page and leave attached buffers clean, because the
1942 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1943 * or jbddirty because all the journalling code will explode.
1944 *
1945 * So what we do is to mark the page "pending dirty" and next time writepage
1946 * is called, propagate that into the buffers appropriately.
1947 */
1948static int ext3_journalled_set_page_dirty(struct page *page)
1949{
1950	SetPageChecked(page);
1951	return __set_page_dirty_nobuffers(page);
1952}
1953
1954static const struct address_space_operations ext3_ordered_aops = {
1955	.readpage		= ext3_readpage,
1956	.readpages		= ext3_readpages,
1957	.writepage		= ext3_ordered_writepage,
1958	.write_begin		= ext3_write_begin,
1959	.write_end		= ext3_ordered_write_end,
1960	.bmap			= ext3_bmap,
1961	.invalidatepage		= ext3_invalidatepage,
1962	.releasepage		= ext3_releasepage,
1963	.direct_IO		= ext3_direct_IO,
1964	.migratepage		= buffer_migrate_page,
1965	.is_partially_uptodate  = block_is_partially_uptodate,
1966	.error_remove_page	= generic_error_remove_page,
1967};
1968
1969static const struct address_space_operations ext3_writeback_aops = {
1970	.readpage		= ext3_readpage,
1971	.readpages		= ext3_readpages,
1972	.writepage		= ext3_writeback_writepage,
1973	.write_begin		= ext3_write_begin,
1974	.write_end		= ext3_writeback_write_end,
1975	.bmap			= ext3_bmap,
1976	.invalidatepage		= ext3_invalidatepage,
1977	.releasepage		= ext3_releasepage,
1978	.direct_IO		= ext3_direct_IO,
1979	.migratepage		= buffer_migrate_page,
1980	.is_partially_uptodate  = block_is_partially_uptodate,
1981	.error_remove_page	= generic_error_remove_page,
1982};
1983
1984static const struct address_space_operations ext3_journalled_aops = {
1985	.readpage		= ext3_readpage,
1986	.readpages		= ext3_readpages,
1987	.writepage		= ext3_journalled_writepage,
1988	.write_begin		= ext3_write_begin,
1989	.write_end		= ext3_journalled_write_end,
1990	.set_page_dirty		= ext3_journalled_set_page_dirty,
1991	.bmap			= ext3_bmap,
1992	.invalidatepage		= ext3_invalidatepage,
1993	.releasepage		= ext3_releasepage,
1994	.is_partially_uptodate  = block_is_partially_uptodate,
1995	.error_remove_page	= generic_error_remove_page,
1996};
1997
1998void ext3_set_aops(struct inode *inode)
1999{
2000	if (ext3_should_order_data(inode))
2001		inode->i_mapping->a_ops = &ext3_ordered_aops;
2002	else if (ext3_should_writeback_data(inode))
2003		inode->i_mapping->a_ops = &ext3_writeback_aops;
2004	else
2005		inode->i_mapping->a_ops = &ext3_journalled_aops;
2006}
2007
2008/*
2009 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
2010 * up to the end of the block which corresponds to `from'.
2011 * This required during truncate. We need to physically zero the tail end
2012 * of that block so it doesn't yield old data if the file is later grown.
2013 */
2014static int ext3_block_truncate_page(struct inode *inode, loff_t from)
2015{
2016	ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
2017	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
2018	unsigned blocksize, iblock, length, pos;
2019	struct page *page;
2020	handle_t *handle = NULL;
2021	struct buffer_head *bh;
2022	int err = 0;
2023
2024	/* Truncated on block boundary - nothing to do */
2025	blocksize = inode->i_sb->s_blocksize;
2026	if ((from & (blocksize - 1)) == 0)
2027		return 0;
2028
2029	page = grab_cache_page(inode->i_mapping, index);
2030	if (!page)
2031		return -ENOMEM;
2032	length = blocksize - (offset & (blocksize - 1));
2033	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
2034
2035	if (!page_has_buffers(page))
2036		create_empty_buffers(page, blocksize, 0);
2037
2038	/* Find the buffer that contains "offset" */
2039	bh = page_buffers(page);
2040	pos = blocksize;
2041	while (offset >= pos) {
2042		bh = bh->b_this_page;
2043		iblock++;
2044		pos += blocksize;
2045	}
2046
2047	err = 0;
2048	if (buffer_freed(bh)) {
2049		BUFFER_TRACE(bh, "freed: skip");
2050		goto unlock;
2051	}
2052
2053	if (!buffer_mapped(bh)) {
2054		BUFFER_TRACE(bh, "unmapped");
2055		ext3_get_block(inode, iblock, bh, 0);
2056		/* unmapped? It's a hole - nothing to do */
2057		if (!buffer_mapped(bh)) {
2058			BUFFER_TRACE(bh, "still unmapped");
2059			goto unlock;
2060		}
2061	}
2062
2063	/* Ok, it's mapped. Make sure it's up-to-date */
2064	if (PageUptodate(page))
2065		set_buffer_uptodate(bh);
2066
2067	if (!buffer_uptodate(bh)) {
2068		err = -EIO;
2069		ll_rw_block(READ, 1, &bh);
2070		wait_on_buffer(bh);
2071		/* Uhhuh. Read error. Complain and punt. */
2072		if (!buffer_uptodate(bh))
2073			goto unlock;
2074	}
2075
2076	/* data=writeback mode doesn't need transaction to zero-out data */
2077	if (!ext3_should_writeback_data(inode)) {
2078		/* We journal at most one block */
2079		handle = ext3_journal_start(inode, 1);
2080		if (IS_ERR(handle)) {
2081			clear_highpage(page);
2082			flush_dcache_page(page);
2083			err = PTR_ERR(handle);
2084			goto unlock;
2085		}
2086	}
2087
2088	if (ext3_should_journal_data(inode)) {
2089		BUFFER_TRACE(bh, "get write access");
2090		err = ext3_journal_get_write_access(handle, bh);
2091		if (err)
2092			goto stop;
2093	}
2094
2095	zero_user(page, offset, length);
2096	BUFFER_TRACE(bh, "zeroed end of block");
2097
2098	err = 0;
2099	if (ext3_should_journal_data(inode)) {
2100		err = ext3_journal_dirty_metadata(handle, bh);
2101	} else {
2102		if (ext3_should_order_data(inode))
2103			err = ext3_journal_dirty_data(handle, bh);
2104		mark_buffer_dirty(bh);
2105	}
2106stop:
2107	if (handle)
2108		ext3_journal_stop(handle);
2109
2110unlock:
2111	unlock_page(page);
2112	page_cache_release(page);
2113	return err;
2114}
2115
2116/*
2117 * Probably it should be a library function... search for first non-zero word
2118 * or memcmp with zero_page, whatever is better for particular architecture.
2119 * Linus?
2120 */
2121static inline int all_zeroes(__le32 *p, __le32 *q)
2122{
2123	while (p < q)
2124		if (*p++)
2125			return 0;
2126	return 1;
2127}
2128
2129/**
2130 *	ext3_find_shared - find the indirect blocks for partial truncation.
2131 *	@inode:	  inode in question
2132 *	@depth:	  depth of the affected branch
2133 *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
2134 *	@chain:	  place to store the pointers to partial indirect blocks
2135 *	@top:	  place to the (detached) top of branch
2136 *
2137 *	This is a helper function used by ext3_truncate().
2138 *
2139 *	When we do truncate() we may have to clean the ends of several
2140 *	indirect blocks but leave the blocks themselves alive. Block is
2141 *	partially truncated if some data below the new i_size is referred
2142 *	from it (and it is on the path to the first completely truncated
2143 *	data block, indeed).  We have to free the top of that path along
2144 *	with everything to the right of the path. Since no allocation
2145 *	past the truncation point is possible until ext3_truncate()
2146 *	finishes, we may safely do the latter, but top of branch may
2147 *	require special attention - pageout below the truncation point
2148 *	might try to populate it.
2149 *
2150 *	We atomically detach the top of branch from the tree, store the
2151 *	block number of its root in *@top, pointers to buffer_heads of
2152 *	partially truncated blocks - in @chain[].bh and pointers to
2153 *	their last elements that should not be removed - in
2154 *	@chain[].p. Return value is the pointer to last filled element
2155 *	of @chain.
2156 *
2157 *	The work left to caller to do the actual freeing of subtrees:
2158 *		a) free the subtree starting from *@top
2159 *		b) free the subtrees whose roots are stored in
2160 *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
2161 *		c) free the subtrees growing from the inode past the @chain[0].
2162 *			(no partially truncated stuff there).  */
2163
2164static Indirect *ext3_find_shared(struct inode *inode, int depth,
2165			int offsets[4], Indirect chain[4], __le32 *top)
2166{
2167	Indirect *partial, *p;
2168	int k, err;
2169
2170	*top = 0;
2171	/* Make k index the deepest non-null offset + 1 */
2172	for (k = depth; k > 1 && !offsets[k-1]; k--)
2173		;
2174	partial = ext3_get_branch(inode, k, offsets, chain, &err);
2175	/* Writer: pointers */
2176	if (!partial)
2177		partial = chain + k-1;
2178	/*
2179	 * If the branch acquired continuation since we've looked at it -
2180	 * fine, it should all survive and (new) top doesn't belong to us.
2181	 */
2182	if (!partial->key && *partial->p)
2183		/* Writer: end */
2184		goto no_top;
2185	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2186		;
2187	/*
2188	 * OK, we've found the last block that must survive. The rest of our
2189	 * branch should be detached before unlocking. However, if that rest
2190	 * of branch is all ours and does not grow immediately from the inode
2191	 * it's easier to cheat and just decrement partial->p.
2192	 */
2193	if (p == chain + k - 1 && p > chain) {
2194		p->p--;
2195	} else {
2196		*top = *p->p;
2197		/* Nope, don't do this in ext3.  Must leave the tree intact */
2198#if 0
2199		*p->p = 0;
2200#endif
2201	}
2202	/* Writer: end */
2203
2204	while(partial > p) {
2205		brelse(partial->bh);
2206		partial--;
2207	}
2208no_top:
2209	return partial;
2210}
2211
2212/*
2213 * Zero a number of block pointers in either an inode or an indirect block.
2214 * If we restart the transaction we must again get write access to the
2215 * indirect block for further modification.
2216 *
2217 * We release `count' blocks on disk, but (last - first) may be greater
2218 * than `count' because there can be holes in there.
2219 */
2220static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2221		struct buffer_head *bh, ext3_fsblk_t block_to_free,
2222		unsigned long count, __le32 *first, __le32 *last)
2223{
2224	__le32 *p;
2225	if (try_to_extend_transaction(handle, inode)) {
2226		if (bh) {
2227			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2228			if (ext3_journal_dirty_metadata(handle, bh))
2229				return;
2230		}
2231		ext3_mark_inode_dirty(handle, inode);
2232		truncate_restart_transaction(handle, inode);
2233		if (bh) {
2234			BUFFER_TRACE(bh, "retaking write access");
2235			if (ext3_journal_get_write_access(handle, bh))
2236				return;
2237		}
2238	}
2239
2240	/*
2241	 * Any buffers which are on the journal will be in memory. We find
2242	 * them on the hash table so journal_revoke() will run journal_forget()
2243	 * on them.  We've already detached each block from the file, so
2244	 * bforget() in journal_forget() should be safe.
2245	 *
2246	 * AKPM: turn on bforget in journal_forget()!!!
2247	 */
2248	for (p = first; p < last; p++) {
2249		u32 nr = le32_to_cpu(*p);
2250		if (nr) {
2251			struct buffer_head *bh;
2252
2253			*p = 0;
2254			bh = sb_find_get_block(inode->i_sb, nr);
2255			ext3_forget(handle, 0, inode, bh, nr);
2256		}
2257	}
2258
2259	ext3_free_blocks(handle, inode, block_to_free, count);
2260}
2261
2262/**
2263 * ext3_free_data - free a list of data blocks
2264 * @handle:	handle for this transaction
2265 * @inode:	inode we are dealing with
2266 * @this_bh:	indirect buffer_head which contains *@first and *@last
2267 * @first:	array of block numbers
2268 * @last:	points immediately past the end of array
2269 *
2270 * We are freeing all blocks referred from that array (numbers are stored as
2271 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2272 *
2273 * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2274 * blocks are contiguous then releasing them at one time will only affect one
2275 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2276 * actually use a lot of journal space.
2277 *
2278 * @this_bh will be %NULL if @first and @last point into the inode's direct
2279 * block pointers.
2280 */
2281static void ext3_free_data(handle_t *handle, struct inode *inode,
2282			   struct buffer_head *this_bh,
2283			   __le32 *first, __le32 *last)
2284{
2285	ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2286	unsigned long count = 0;	    /* Number of blocks in the run */
2287	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
2288					       corresponding to
2289					       block_to_free */
2290	ext3_fsblk_t nr;		    /* Current block # */
2291	__le32 *p;			    /* Pointer into inode/ind
2292					       for current block */
2293	int err;
2294
2295	if (this_bh) {				/* For indirect block */
2296		BUFFER_TRACE(this_bh, "get_write_access");
2297		err = ext3_journal_get_write_access(handle, this_bh);
2298		/* Important: if we can't update the indirect pointers
2299		 * to the blocks, we can't free them. */
2300		if (err)
2301			return;
2302	}
2303
2304	for (p = first; p < last; p++) {
2305		nr = le32_to_cpu(*p);
2306		if (nr) {
2307			/* accumulate blocks to free if they're contiguous */
2308			if (count == 0) {
2309				block_to_free = nr;
2310				block_to_free_p = p;
2311				count = 1;
2312			} else if (nr == block_to_free + count) {
2313				count++;
2314			} else {
2315				ext3_clear_blocks(handle, inode, this_bh,
2316						  block_to_free,
2317						  count, block_to_free_p, p);
2318				block_to_free = nr;
2319				block_to_free_p = p;
2320				count = 1;
2321			}
2322		}
2323	}
2324
2325	if (count > 0)
2326		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2327				  count, block_to_free_p, p);
2328
2329	if (this_bh) {
2330		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2331
2332		/*
2333		 * The buffer head should have an attached journal head at this
2334		 * point. However, if the data is corrupted and an indirect
2335		 * block pointed to itself, it would have been detached when
2336		 * the block was cleared. Check for this instead of OOPSing.
2337		 */
2338		if (bh2jh(this_bh))
2339			ext3_journal_dirty_metadata(handle, this_bh);
2340		else
2341			ext3_error(inode->i_sb, "ext3_free_data",
2342				   "circular indirect block detected, "
2343				   "inode=%lu, block=%llu",
2344				   inode->i_ino,
2345				   (unsigned long long)this_bh->b_blocknr);
2346	}
2347}
2348
2349/**
2350 *	ext3_free_branches - free an array of branches
2351 *	@handle: JBD handle for this transaction
2352 *	@inode:	inode we are dealing with
2353 *	@parent_bh: the buffer_head which contains *@first and *@last
2354 *	@first:	array of block numbers
2355 *	@last:	pointer immediately past the end of array
2356 *	@depth:	depth of the branches to free
2357 *
2358 *	We are freeing all blocks referred from these branches (numbers are
2359 *	stored as little-endian 32-bit) and updating @inode->i_blocks
2360 *	appropriately.
2361 */
2362static void ext3_free_branches(handle_t *handle, struct inode *inode,
2363			       struct buffer_head *parent_bh,
2364			       __le32 *first, __le32 *last, int depth)
2365{
2366	ext3_fsblk_t nr;
2367	__le32 *p;
2368
2369	if (is_handle_aborted(handle))
2370		return;
2371
2372	if (depth--) {
2373		struct buffer_head *bh;
2374		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2375		p = last;
2376		while (--p >= first) {
2377			nr = le32_to_cpu(*p);
2378			if (!nr)
2379				continue;		/* A hole */
2380
2381			/* Go read the buffer for the next level down */
2382			bh = sb_bread(inode->i_sb, nr);
2383
2384			/*
2385			 * A read failure? Report error and clear slot
2386			 * (should be rare).
2387			 */
2388			if (!bh) {
2389				ext3_error(inode->i_sb, "ext3_free_branches",
2390					   "Read failure, inode=%lu, block="E3FSBLK,
2391					   inode->i_ino, nr);
2392				continue;
2393			}
2394
2395			/* This zaps the entire block.  Bottom up. */
2396			BUFFER_TRACE(bh, "free child branches");
2397			ext3_free_branches(handle, inode, bh,
2398					   (__le32*)bh->b_data,
2399					   (__le32*)bh->b_data + addr_per_block,
2400					   depth);
2401
2402			/*
2403			 * Everything below this this pointer has been
2404			 * released.  Now let this top-of-subtree go.
2405			 *
2406			 * We want the freeing of this indirect block to be
2407			 * atomic in the journal with the updating of the
2408			 * bitmap block which owns it.  So make some room in
2409			 * the journal.
2410			 *
2411			 * We zero the parent pointer *after* freeing its
2412			 * pointee in the bitmaps, so if extend_transaction()
2413			 * for some reason fails to put the bitmap changes and
2414			 * the release into the same transaction, recovery
2415			 * will merely complain about releasing a free block,
2416			 * rather than leaking blocks.
2417			 */
2418			if (is_handle_aborted(handle))
2419				return;
2420			if (try_to_extend_transaction(handle, inode)) {
2421				ext3_mark_inode_dirty(handle, inode);
2422				truncate_restart_transaction(handle, inode);
2423			}
2424
2425			/*
2426			 * We've probably journalled the indirect block several
2427			 * times during the truncate.  But it's no longer
2428			 * needed and we now drop it from the transaction via
2429			 * journal_revoke().
2430			 *
2431			 * That's easy if it's exclusively part of this
2432			 * transaction.  But if it's part of the committing
2433			 * transaction then journal_forget() will simply
2434			 * brelse() it.  That means that if the underlying
2435			 * block is reallocated in ext3_get_block(),
2436			 * unmap_underlying_metadata() will find this block
2437			 * and will try to get rid of it.  damn, damn. Thus
2438			 * we don't allow a block to be reallocated until
2439			 * a transaction freeing it has fully committed.
2440			 *
2441			 * We also have to make sure journal replay after a
2442			 * crash does not overwrite non-journaled data blocks
2443			 * with old metadata when the block got reallocated for
2444			 * data.  Thus we have to store a revoke record for a
2445			 * block in the same transaction in which we free the
2446			 * block.
2447			 */
2448			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2449
2450			ext3_free_blocks(handle, inode, nr, 1);
2451
2452			if (parent_bh) {
2453				/*
2454				 * The block which we have just freed is
2455				 * pointed to by an indirect block: journal it
2456				 */
2457				BUFFER_TRACE(parent_bh, "get_write_access");
2458				if (!ext3_journal_get_write_access(handle,
2459								   parent_bh)){
2460					*p = 0;
2461					BUFFER_TRACE(parent_bh,
2462					"call ext3_journal_dirty_metadata");
2463					ext3_journal_dirty_metadata(handle,
2464								    parent_bh);
2465				}
2466			}
2467		}
2468	} else {
2469		/* We have reached the bottom of the tree. */
2470		BUFFER_TRACE(parent_bh, "free data blocks");
2471		ext3_free_data(handle, inode, parent_bh, first, last);
2472	}
2473}
2474
2475int ext3_can_truncate(struct inode *inode)
2476{
2477	if (S_ISREG(inode->i_mode))
2478		return 1;
2479	if (S_ISDIR(inode->i_mode))
2480		return 1;
2481	if (S_ISLNK(inode->i_mode))
2482		return !ext3_inode_is_fast_symlink(inode);
2483	return 0;
2484}
2485
2486/*
2487 * ext3_truncate()
2488 *
2489 * We block out ext3_get_block() block instantiations across the entire
2490 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2491 * simultaneously on behalf of the same inode.
2492 *
2493 * As we work through the truncate and commmit bits of it to the journal there
2494 * is one core, guiding principle: the file's tree must always be consistent on
2495 * disk.  We must be able to restart the truncate after a crash.
2496 *
2497 * The file's tree may be transiently inconsistent in memory (although it
2498 * probably isn't), but whenever we close off and commit a journal transaction,
2499 * the contents of (the filesystem + the journal) must be consistent and
2500 * restartable.  It's pretty simple, really: bottom up, right to left (although
2501 * left-to-right works OK too).
2502 *
2503 * Note that at recovery time, journal replay occurs *before* the restart of
2504 * truncate against the orphan inode list.
2505 *
2506 * The committed inode has the new, desired i_size (which is the same as
2507 * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2508 * that this inode's truncate did not complete and it will again call
2509 * ext3_truncate() to have another go.  So there will be instantiated blocks
2510 * to the right of the truncation point in a crashed ext3 filesystem.  But
2511 * that's fine - as long as they are linked from the inode, the post-crash
2512 * ext3_truncate() run will find them and release them.
2513 */
2514void ext3_truncate(struct inode *inode)
2515{
2516	handle_t *handle;
2517	struct ext3_inode_info *ei = EXT3_I(inode);
2518	__le32 *i_data = ei->i_data;
2519	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2520	int offsets[4];
2521	Indirect chain[4];
2522	Indirect *partial;
2523	__le32 nr = 0;
2524	int n;
2525	long last_block;
2526	unsigned blocksize = inode->i_sb->s_blocksize;
2527
2528	trace_ext3_truncate_enter(inode);
2529
2530	if (!ext3_can_truncate(inode))
2531		goto out_notrans;
2532
2533	if (inode->i_size == 0 && ext3_should_writeback_data(inode))
2534		ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
2535
2536	handle = start_transaction(inode);
2537	if (IS_ERR(handle))
2538		goto out_notrans;
2539
2540	last_block = (inode->i_size + blocksize-1)
2541					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2542	n = ext3_block_to_path(inode, last_block, offsets, NULL);
2543	if (n == 0)
2544		goto out_stop;	/* error */
2545
2546	/*
2547	 * OK.  This truncate is going to happen.  We add the inode to the
2548	 * orphan list, so that if this truncate spans multiple transactions,
2549	 * and we crash, we will resume the truncate when the filesystem
2550	 * recovers.  It also marks the inode dirty, to catch the new size.
2551	 *
2552	 * Implication: the file must always be in a sane, consistent
2553	 * truncatable state while each transaction commits.
2554	 */
2555	if (ext3_orphan_add(handle, inode))
2556		goto out_stop;
2557
2558	/*
2559	 * The orphan list entry will now protect us from any crash which
2560	 * occurs before the truncate completes, so it is now safe to propagate
2561	 * the new, shorter inode size (held for now in i_size) into the
2562	 * on-disk inode. We do this via i_disksize, which is the value which
2563	 * ext3 *really* writes onto the disk inode.
2564	 */
2565	ei->i_disksize = inode->i_size;
2566
2567	/*
2568	 * From here we block out all ext3_get_block() callers who want to
2569	 * modify the block allocation tree.
2570	 */
2571	mutex_lock(&ei->truncate_mutex);
2572
2573	if (n == 1) {		/* direct blocks */
2574		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2575			       i_data + EXT3_NDIR_BLOCKS);
2576		goto do_indirects;
2577	}
2578
2579	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2580	/* Kill the top of shared branch (not detached) */
2581	if (nr) {
2582		if (partial == chain) {
2583			/* Shared branch grows from the inode */
2584			ext3_free_branches(handle, inode, NULL,
2585					   &nr, &nr+1, (chain+n-1) - partial);
2586			*partial->p = 0;
2587			/*
2588			 * We mark the inode dirty prior to restart,
2589			 * and prior to stop.  No need for it here.
2590			 */
2591		} else {
2592			/* Shared branch grows from an indirect block */
2593			ext3_free_branches(handle, inode, partial->bh,
2594					partial->p,
2595					partial->p+1, (chain+n-1) - partial);
2596		}
2597	}
2598	/* Clear the ends of indirect blocks on the shared branch */
2599	while (partial > chain) {
2600		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2601				   (__le32*)partial->bh->b_data+addr_per_block,
2602				   (chain+n-1) - partial);
2603		BUFFER_TRACE(partial->bh, "call brelse");
2604		brelse (partial->bh);
2605		partial--;
2606	}
2607do_indirects:
2608	/* Kill the remaining (whole) subtrees */
2609	switch (offsets[0]) {
2610	default:
2611		nr = i_data[EXT3_IND_BLOCK];
2612		if (nr) {
2613			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2614			i_data[EXT3_IND_BLOCK] = 0;
2615		}
2616	case EXT3_IND_BLOCK:
2617		nr = i_data[EXT3_DIND_BLOCK];
2618		if (nr) {
2619			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2620			i_data[EXT3_DIND_BLOCK] = 0;
2621		}
2622	case EXT3_DIND_BLOCK:
2623		nr = i_data[EXT3_TIND_BLOCK];
2624		if (nr) {
2625			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2626			i_data[EXT3_TIND_BLOCK] = 0;
2627		}
2628	case EXT3_TIND_BLOCK:
2629		;
2630	}
2631
2632	ext3_discard_reservation(inode);
2633
2634	mutex_unlock(&ei->truncate_mutex);
2635	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2636	ext3_mark_inode_dirty(handle, inode);
2637
2638	/*
2639	 * In a multi-transaction truncate, we only make the final transaction
2640	 * synchronous
2641	 */
2642	if (IS_SYNC(inode))
2643		handle->h_sync = 1;
2644out_stop:
2645	/*
2646	 * If this was a simple ftruncate(), and the file will remain alive
2647	 * then we need to clear up the orphan record which we created above.
2648	 * However, if this was a real unlink then we were called by
2649	 * ext3_evict_inode(), and we allow that function to clean up the
2650	 * orphan info for us.
2651	 */
2652	if (inode->i_nlink)
2653		ext3_orphan_del(handle, inode);
2654
2655	ext3_journal_stop(handle);
2656	trace_ext3_truncate_exit(inode);
2657	return;
2658out_notrans:
2659	/*
2660	 * Delete the inode from orphan list so that it doesn't stay there
2661	 * forever and trigger assertion on umount.
2662	 */
2663	if (inode->i_nlink)
2664		ext3_orphan_del(NULL, inode);
2665	trace_ext3_truncate_exit(inode);
2666}
2667
2668static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2669		unsigned long ino, struct ext3_iloc *iloc)
2670{
2671	unsigned long block_group;
2672	unsigned long offset;
2673	ext3_fsblk_t block;
2674	struct ext3_group_desc *gdp;
2675
2676	if (!ext3_valid_inum(sb, ino)) {
2677		/*
2678		 * This error is already checked for in namei.c unless we are
2679		 * looking at an NFS filehandle, in which case no error
2680		 * report is needed
2681		 */
2682		return 0;
2683	}
2684
2685	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2686	gdp = ext3_get_group_desc(sb, block_group, NULL);
2687	if (!gdp)
2688		return 0;
2689	/*
2690	 * Figure out the offset within the block group inode table
2691	 */
2692	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2693		EXT3_INODE_SIZE(sb);
2694	block = le32_to_cpu(gdp->bg_inode_table) +
2695		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
2696
2697	iloc->block_group = block_group;
2698	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2699	return block;
2700}
2701
2702/*
2703 * ext3_get_inode_loc returns with an extra refcount against the inode's
2704 * underlying buffer_head on success. If 'in_mem' is true, we have all
2705 * data in memory that is needed to recreate the on-disk version of this
2706 * inode.
2707 */
2708static int __ext3_get_inode_loc(struct inode *inode,
2709				struct ext3_iloc *iloc, int in_mem)
2710{
2711	ext3_fsblk_t block;
2712	struct buffer_head *bh;
2713
2714	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2715	if (!block)
2716		return -EIO;
2717
2718	bh = sb_getblk(inode->i_sb, block);
2719	if (!bh) {
2720		ext3_error (inode->i_sb, "ext3_get_inode_loc",
2721				"unable to read inode block - "
2722				"inode=%lu, block="E3FSBLK,
2723				 inode->i_ino, block);
2724		return -EIO;
2725	}
2726	if (!buffer_uptodate(bh)) {
2727		lock_buffer(bh);
2728
2729		/*
2730		 * If the buffer has the write error flag, we have failed
2731		 * to write out another inode in the same block.  In this
2732		 * case, we don't have to read the block because we may
2733		 * read the old inode data successfully.
2734		 */
2735		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2736			set_buffer_uptodate(bh);
2737
2738		if (buffer_uptodate(bh)) {
2739			/* someone brought it uptodate while we waited */
2740			unlock_buffer(bh);
2741			goto has_buffer;
2742		}
2743
2744		/*
2745		 * If we have all information of the inode in memory and this
2746		 * is the only valid inode in the block, we need not read the
2747		 * block.
2748		 */
2749		if (in_mem) {
2750			struct buffer_head *bitmap_bh;
2751			struct ext3_group_desc *desc;
2752			int inodes_per_buffer;
2753			int inode_offset, i;
2754			int block_group;
2755			int start;
2756
2757			block_group = (inode->i_ino - 1) /
2758					EXT3_INODES_PER_GROUP(inode->i_sb);
2759			inodes_per_buffer = bh->b_size /
2760				EXT3_INODE_SIZE(inode->i_sb);
2761			inode_offset = ((inode->i_ino - 1) %
2762					EXT3_INODES_PER_GROUP(inode->i_sb));
2763			start = inode_offset & ~(inodes_per_buffer - 1);
2764
2765			/* Is the inode bitmap in cache? */
2766			desc = ext3_get_group_desc(inode->i_sb,
2767						block_group, NULL);
2768			if (!desc)
2769				goto make_io;
2770
2771			bitmap_bh = sb_getblk(inode->i_sb,
2772					le32_to_cpu(desc->bg_inode_bitmap));
2773			if (!bitmap_bh)
2774				goto make_io;
2775
2776			/*
2777			 * If the inode bitmap isn't in cache then the
2778			 * optimisation may end up performing two reads instead
2779			 * of one, so skip it.
2780			 */
2781			if (!buffer_uptodate(bitmap_bh)) {
2782				brelse(bitmap_bh);
2783				goto make_io;
2784			}
2785			for (i = start; i < start + inodes_per_buffer; i++) {
2786				if (i == inode_offset)
2787					continue;
2788				if (ext3_test_bit(i, bitmap_bh->b_data))
2789					break;
2790			}
2791			brelse(bitmap_bh);
2792			if (i == start + inodes_per_buffer) {
2793				/* all other inodes are free, so skip I/O */
2794				memset(bh->b_data, 0, bh->b_size);
2795				set_buffer_uptodate(bh);
2796				unlock_buffer(bh);
2797				goto has_buffer;
2798			}
2799		}
2800
2801make_io:
2802		/*
2803		 * There are other valid inodes in the buffer, this inode
2804		 * has in-inode xattrs, or we don't have this inode in memory.
2805		 * Read the block from disk.
2806		 */
2807		trace_ext3_load_inode(inode);
2808		get_bh(bh);
2809		bh->b_end_io = end_buffer_read_sync;
2810		submit_bh(READ | REQ_META | REQ_PRIO, bh);
2811		wait_on_buffer(bh);
2812		if (!buffer_uptodate(bh)) {
2813			ext3_error(inode->i_sb, "ext3_get_inode_loc",
2814					"unable to read inode block - "
2815					"inode=%lu, block="E3FSBLK,
2816					inode->i_ino, block);
2817			brelse(bh);
2818			return -EIO;
2819		}
2820	}
2821has_buffer:
2822	iloc->bh = bh;
2823	return 0;
2824}
2825
2826int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2827{
2828	/* We have all inode data except xattrs in memory here. */
2829	return __ext3_get_inode_loc(inode, iloc,
2830		!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
2831}
2832
2833void ext3_set_inode_flags(struct inode *inode)
2834{
2835	unsigned int flags = EXT3_I(inode)->i_flags;
2836
2837	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2838	if (flags & EXT3_SYNC_FL)
2839		inode->i_flags |= S_SYNC;
2840	if (flags & EXT3_APPEND_FL)
2841		inode->i_flags |= S_APPEND;
2842	if (flags & EXT3_IMMUTABLE_FL)
2843		inode->i_flags |= S_IMMUTABLE;
2844	if (flags & EXT3_NOATIME_FL)
2845		inode->i_flags |= S_NOATIME;
2846	if (flags & EXT3_DIRSYNC_FL)
2847		inode->i_flags |= S_DIRSYNC;
2848}
2849
2850/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2851void ext3_get_inode_flags(struct ext3_inode_info *ei)
2852{
2853	unsigned int flags = ei->vfs_inode.i_flags;
2854
2855	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2856			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2857	if (flags & S_SYNC)
2858		ei->i_flags |= EXT3_SYNC_FL;
2859	if (flags & S_APPEND)
2860		ei->i_flags |= EXT3_APPEND_FL;
2861	if (flags & S_IMMUTABLE)
2862		ei->i_flags |= EXT3_IMMUTABLE_FL;
2863	if (flags & S_NOATIME)
2864		ei->i_flags |= EXT3_NOATIME_FL;
2865	if (flags & S_DIRSYNC)
2866		ei->i_flags |= EXT3_DIRSYNC_FL;
2867}
2868
2869struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2870{
2871	struct ext3_iloc iloc;
2872	struct ext3_inode *raw_inode;
2873	struct ext3_inode_info *ei;
2874	struct buffer_head *bh;
2875	struct inode *inode;
2876	journal_t *journal = EXT3_SB(sb)->s_journal;
2877	transaction_t *transaction;
2878	long ret;
2879	int block;
 
 
2880
2881	inode = iget_locked(sb, ino);
2882	if (!inode)
2883		return ERR_PTR(-ENOMEM);
2884	if (!(inode->i_state & I_NEW))
2885		return inode;
2886
2887	ei = EXT3_I(inode);
2888	ei->i_block_alloc_info = NULL;
2889
2890	ret = __ext3_get_inode_loc(inode, &iloc, 0);
2891	if (ret < 0)
2892		goto bad_inode;
2893	bh = iloc.bh;
2894	raw_inode = ext3_raw_inode(&iloc);
2895	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2896	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2897	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2898	if(!(test_opt (inode->i_sb, NO_UID32))) {
2899		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2900		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2901	}
2902	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 
 
2903	inode->i_size = le32_to_cpu(raw_inode->i_size);
2904	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2905	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2906	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2907	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2908
2909	ei->i_state_flags = 0;
2910	ei->i_dir_start_lookup = 0;
2911	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2912	/* We now have enough fields to check if the inode was active or not.
2913	 * This is needed because nfsd might try to access dead inodes
2914	 * the test is that same one that e2fsck uses
2915	 * NeilBrown 1999oct15
2916	 */
2917	if (inode->i_nlink == 0) {
2918		if (inode->i_mode == 0 ||
2919		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2920			/* this inode is deleted */
2921			brelse (bh);
2922			ret = -ESTALE;
2923			goto bad_inode;
2924		}
2925		/* The only unlinked inodes we let through here have
2926		 * valid i_mode and are being read by the orphan
2927		 * recovery code: that's fine, we're about to complete
2928		 * the process of deleting those. */
2929	}
2930	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2931	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2932#ifdef EXT3_FRAGMENTS
2933	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2934	ei->i_frag_no = raw_inode->i_frag;
2935	ei->i_frag_size = raw_inode->i_fsize;
2936#endif
2937	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2938	if (!S_ISREG(inode->i_mode)) {
2939		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2940	} else {
2941		inode->i_size |=
2942			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2943	}
2944	ei->i_disksize = inode->i_size;
2945	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2946	ei->i_block_group = iloc.block_group;
2947	/*
2948	 * NOTE! The in-memory inode i_data array is in little-endian order
2949	 * even on big-endian machines: we do NOT byteswap the block numbers!
2950	 */
2951	for (block = 0; block < EXT3_N_BLOCKS; block++)
2952		ei->i_data[block] = raw_inode->i_block[block];
2953	INIT_LIST_HEAD(&ei->i_orphan);
2954
2955	/*
2956	 * Set transaction id's of transactions that have to be committed
2957	 * to finish f[data]sync. We set them to currently running transaction
2958	 * as we cannot be sure that the inode or some of its metadata isn't
2959	 * part of the transaction - the inode could have been reclaimed and
2960	 * now it is reread from disk.
2961	 */
2962	if (journal) {
2963		tid_t tid;
2964
2965		spin_lock(&journal->j_state_lock);
2966		if (journal->j_running_transaction)
2967			transaction = journal->j_running_transaction;
2968		else
2969			transaction = journal->j_committing_transaction;
2970		if (transaction)
2971			tid = transaction->t_tid;
2972		else
2973			tid = journal->j_commit_sequence;
2974		spin_unlock(&journal->j_state_lock);
2975		atomic_set(&ei->i_sync_tid, tid);
2976		atomic_set(&ei->i_datasync_tid, tid);
2977	}
2978
2979	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2980	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2981		/*
2982		 * When mke2fs creates big inodes it does not zero out
2983		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
2984		 * so ignore those first few inodes.
2985		 */
2986		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2987		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2988		    EXT3_INODE_SIZE(inode->i_sb)) {
2989			brelse (bh);
2990			ret = -EIO;
2991			goto bad_inode;
2992		}
2993		if (ei->i_extra_isize == 0) {
2994			/* The extra space is currently unused. Use it. */
2995			ei->i_extra_isize = sizeof(struct ext3_inode) -
2996					    EXT3_GOOD_OLD_INODE_SIZE;
2997		} else {
2998			__le32 *magic = (void *)raw_inode +
2999					EXT3_GOOD_OLD_INODE_SIZE +
3000					ei->i_extra_isize;
3001			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
3002				 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
3003		}
3004	} else
3005		ei->i_extra_isize = 0;
3006
3007	if (S_ISREG(inode->i_mode)) {
3008		inode->i_op = &ext3_file_inode_operations;
3009		inode->i_fop = &ext3_file_operations;
3010		ext3_set_aops(inode);
3011	} else if (S_ISDIR(inode->i_mode)) {
3012		inode->i_op = &ext3_dir_inode_operations;
3013		inode->i_fop = &ext3_dir_operations;
3014	} else if (S_ISLNK(inode->i_mode)) {
3015		if (ext3_inode_is_fast_symlink(inode)) {
3016			inode->i_op = &ext3_fast_symlink_inode_operations;
3017			nd_terminate_link(ei->i_data, inode->i_size,
3018				sizeof(ei->i_data) - 1);
3019		} else {
3020			inode->i_op = &ext3_symlink_inode_operations;
3021			ext3_set_aops(inode);
3022		}
3023	} else {
3024		inode->i_op = &ext3_special_inode_operations;
3025		if (raw_inode->i_block[0])
3026			init_special_inode(inode, inode->i_mode,
3027			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3028		else
3029			init_special_inode(inode, inode->i_mode,
3030			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3031	}
3032	brelse (iloc.bh);
3033	ext3_set_inode_flags(inode);
3034	unlock_new_inode(inode);
3035	return inode;
3036
3037bad_inode:
3038	iget_failed(inode);
3039	return ERR_PTR(ret);
3040}
3041
3042/*
3043 * Post the struct inode info into an on-disk inode location in the
3044 * buffer-cache.  This gobbles the caller's reference to the
3045 * buffer_head in the inode location struct.
3046 *
3047 * The caller must have write access to iloc->bh.
3048 */
3049static int ext3_do_update_inode(handle_t *handle,
3050				struct inode *inode,
3051				struct ext3_iloc *iloc)
3052{
3053	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
3054	struct ext3_inode_info *ei = EXT3_I(inode);
3055	struct buffer_head *bh = iloc->bh;
3056	int err = 0, rc, block;
 
 
 
 
3057
3058again:
3059	/* we can't allow multiple procs in here at once, its a bit racey */
3060	lock_buffer(bh);
3061
3062	/* For fields not not tracking in the in-memory inode,
3063	 * initialise them to zero for new inodes. */
3064	if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
3065		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
3066
3067	ext3_get_inode_flags(ei);
3068	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 
 
3069	if(!(test_opt(inode->i_sb, NO_UID32))) {
3070		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
3071		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
3072/*
3073 * Fix up interoperability with old kernels. Otherwise, old inodes get
3074 * re-used with the upper 16 bits of the uid/gid intact
3075 */
3076		if(!ei->i_dtime) {
3077			raw_inode->i_uid_high =
3078				cpu_to_le16(high_16_bits(inode->i_uid));
3079			raw_inode->i_gid_high =
3080				cpu_to_le16(high_16_bits(inode->i_gid));
3081		} else {
3082			raw_inode->i_uid_high = 0;
3083			raw_inode->i_gid_high = 0;
3084		}
3085	} else {
3086		raw_inode->i_uid_low =
3087			cpu_to_le16(fs_high2lowuid(inode->i_uid));
3088		raw_inode->i_gid_low =
3089			cpu_to_le16(fs_high2lowgid(inode->i_gid));
3090		raw_inode->i_uid_high = 0;
3091		raw_inode->i_gid_high = 0;
3092	}
3093	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
3094	raw_inode->i_size = cpu_to_le32(ei->i_disksize);
 
 
 
 
3095	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
3096	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
3097	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
3098	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
3099	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
3100	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
3101#ifdef EXT3_FRAGMENTS
3102	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
3103	raw_inode->i_frag = ei->i_frag_no;
3104	raw_inode->i_fsize = ei->i_frag_size;
3105#endif
3106	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
3107	if (!S_ISREG(inode->i_mode)) {
3108		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
3109	} else {
3110		raw_inode->i_size_high =
3111			cpu_to_le32(ei->i_disksize >> 32);
 
 
 
3112		if (ei->i_disksize > 0x7fffffffULL) {
3113			struct super_block *sb = inode->i_sb;
3114			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
3115					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
3116			    EXT3_SB(sb)->s_es->s_rev_level ==
3117					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
3118			       /* If this is the first large file
3119				* created, add a flag to the superblock.
3120				*/
3121				unlock_buffer(bh);
3122				err = ext3_journal_get_write_access(handle,
3123						EXT3_SB(sb)->s_sbh);
3124				if (err)
3125					goto out_brelse;
3126
3127				ext3_update_dynamic_rev(sb);
3128				EXT3_SET_RO_COMPAT_FEATURE(sb,
3129					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
3130				handle->h_sync = 1;
3131				err = ext3_journal_dirty_metadata(handle,
3132						EXT3_SB(sb)->s_sbh);
3133				/* get our lock and start over */
3134				goto again;
3135			}
3136		}
3137	}
3138	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3139	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3140		if (old_valid_dev(inode->i_rdev)) {
3141			raw_inode->i_block[0] =
3142				cpu_to_le32(old_encode_dev(inode->i_rdev));
3143			raw_inode->i_block[1] = 0;
3144		} else {
3145			raw_inode->i_block[0] = 0;
3146			raw_inode->i_block[1] =
3147				cpu_to_le32(new_encode_dev(inode->i_rdev));
3148			raw_inode->i_block[2] = 0;
3149		}
3150	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
3151		raw_inode->i_block[block] = ei->i_data[block];
3152
3153	if (ei->i_extra_isize)
3154		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3155
3156	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
3157	unlock_buffer(bh);
3158	rc = ext3_journal_dirty_metadata(handle, bh);
3159	if (!err)
3160		err = rc;
3161	ext3_clear_inode_state(inode, EXT3_STATE_NEW);
3162
3163	atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
 
 
3164out_brelse:
3165	brelse (bh);
3166	ext3_std_error(inode->i_sb, err);
3167	return err;
3168}
3169
3170/*
3171 * ext3_write_inode()
3172 *
3173 * We are called from a few places:
3174 *
3175 * - Within generic_file_write() for O_SYNC files.
3176 *   Here, there will be no transaction running. We wait for any running
3177 *   trasnaction to commit.
3178 *
3179 * - Within sys_sync(), kupdate and such.
3180 *   We wait on commit, if tol to.
3181 *
3182 * - Within prune_icache() (PF_MEMALLOC == true)
3183 *   Here we simply return.  We can't afford to block kswapd on the
3184 *   journal commit.
3185 *
3186 * In all cases it is actually safe for us to return without doing anything,
3187 * because the inode has been copied into a raw inode buffer in
3188 * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
3189 * knfsd.
3190 *
3191 * Note that we are absolutely dependent upon all inode dirtiers doing the
3192 * right thing: they *must* call mark_inode_dirty() after dirtying info in
3193 * which we are interested.
3194 *
3195 * It would be a bug for them to not do this.  The code:
3196 *
3197 *	mark_inode_dirty(inode)
3198 *	stuff();
3199 *	inode->i_size = expr;
3200 *
3201 * is in error because a kswapd-driven write_inode() could occur while
3202 * `stuff()' is running, and the new i_size will be lost.  Plus the inode
3203 * will no longer be on the superblock's dirty inode list.
3204 */
3205int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
3206{
3207	if (current->flags & PF_MEMALLOC)
3208		return 0;
3209
3210	if (ext3_journal_current_handle()) {
3211		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3212		dump_stack();
3213		return -EIO;
3214	}
3215
3216	if (wbc->sync_mode != WB_SYNC_ALL)
3217		return 0;
3218
3219	return ext3_force_commit(inode->i_sb);
3220}
3221
3222/*
3223 * ext3_setattr()
3224 *
3225 * Called from notify_change.
3226 *
3227 * We want to trap VFS attempts to truncate the file as soon as
3228 * possible.  In particular, we want to make sure that when the VFS
3229 * shrinks i_size, we put the inode on the orphan list and modify
3230 * i_disksize immediately, so that during the subsequent flushing of
3231 * dirty pages and freeing of disk blocks, we can guarantee that any
3232 * commit will leave the blocks being flushed in an unused state on
3233 * disk.  (On recovery, the inode will get truncated and the blocks will
3234 * be freed, so we have a strong guarantee that no future commit will
3235 * leave these blocks visible to the user.)
3236 *
3237 * Called with inode->sem down.
3238 */
3239int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3240{
3241	struct inode *inode = dentry->d_inode;
3242	int error, rc = 0;
3243	const unsigned int ia_valid = attr->ia_valid;
3244
3245	error = inode_change_ok(inode, attr);
3246	if (error)
3247		return error;
3248
3249	if (is_quota_modification(inode, attr))
3250		dquot_initialize(inode);
3251	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3252		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3253		handle_t *handle;
3254
3255		/* (user+group)*(old+new) structure, inode write (sb,
3256		 * inode block, ? - but truncate inode update has it) */
3257		handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
3258					EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
3259		if (IS_ERR(handle)) {
3260			error = PTR_ERR(handle);
3261			goto err_out;
3262		}
3263		error = dquot_transfer(inode, attr);
3264		if (error) {
3265			ext3_journal_stop(handle);
3266			return error;
3267		}
3268		/* Update corresponding info in inode so that everything is in
3269		 * one transaction */
3270		if (attr->ia_valid & ATTR_UID)
3271			inode->i_uid = attr->ia_uid;
3272		if (attr->ia_valid & ATTR_GID)
3273			inode->i_gid = attr->ia_gid;
3274		error = ext3_mark_inode_dirty(handle, inode);
3275		ext3_journal_stop(handle);
3276	}
3277
3278	if (attr->ia_valid & ATTR_SIZE)
3279		inode_dio_wait(inode);
3280
3281	if (S_ISREG(inode->i_mode) &&
3282	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3283		handle_t *handle;
3284
3285		handle = ext3_journal_start(inode, 3);
3286		if (IS_ERR(handle)) {
3287			error = PTR_ERR(handle);
3288			goto err_out;
3289		}
3290
3291		error = ext3_orphan_add(handle, inode);
3292		if (error) {
3293			ext3_journal_stop(handle);
3294			goto err_out;
3295		}
3296		EXT3_I(inode)->i_disksize = attr->ia_size;
3297		error = ext3_mark_inode_dirty(handle, inode);
3298		ext3_journal_stop(handle);
3299		if (error) {
3300			/* Some hard fs error must have happened. Bail out. */
3301			ext3_orphan_del(NULL, inode);
3302			goto err_out;
3303		}
3304		rc = ext3_block_truncate_page(inode, attr->ia_size);
3305		if (rc) {
3306			/* Cleanup orphan list and exit */
3307			handle = ext3_journal_start(inode, 3);
3308			if (IS_ERR(handle)) {
3309				ext3_orphan_del(NULL, inode);
3310				goto err_out;
3311			}
3312			ext3_orphan_del(handle, inode);
3313			ext3_journal_stop(handle);
3314			goto err_out;
3315		}
3316	}
3317
3318	if ((attr->ia_valid & ATTR_SIZE) &&
3319	    attr->ia_size != i_size_read(inode)) {
3320		truncate_setsize(inode, attr->ia_size);
3321		ext3_truncate(inode);
3322	}
3323
3324	setattr_copy(inode, attr);
3325	mark_inode_dirty(inode);
3326
3327	if (ia_valid & ATTR_MODE)
3328		rc = ext3_acl_chmod(inode);
3329
3330err_out:
3331	ext3_std_error(inode->i_sb, error);
3332	if (!error)
3333		error = rc;
3334	return error;
3335}
3336
3337
3338/*
3339 * How many blocks doth make a writepage()?
3340 *
3341 * With N blocks per page, it may be:
3342 * N data blocks
3343 * 2 indirect block
3344 * 2 dindirect
3345 * 1 tindirect
3346 * N+5 bitmap blocks (from the above)
3347 * N+5 group descriptor summary blocks
3348 * 1 inode block
3349 * 1 superblock.
3350 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3351 *
3352 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3353 *
3354 * With ordered or writeback data it's the same, less the N data blocks.
3355 *
3356 * If the inode's direct blocks can hold an integral number of pages then a
3357 * page cannot straddle two indirect blocks, and we can only touch one indirect
3358 * and dindirect block, and the "5" above becomes "3".
3359 *
3360 * This still overestimates under most circumstances.  If we were to pass the
3361 * start and end offsets in here as well we could do block_to_path() on each
3362 * block and work out the exact number of indirects which are touched.  Pah.
3363 */
3364
3365static int ext3_writepage_trans_blocks(struct inode *inode)
3366{
3367	int bpp = ext3_journal_blocks_per_page(inode);
3368	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3369	int ret;
3370
3371	if (ext3_should_journal_data(inode))
3372		ret = 3 * (bpp + indirects) + 2;
3373	else
3374		ret = 2 * (bpp + indirects) + indirects + 2;
3375
3376#ifdef CONFIG_QUOTA
3377	/* We know that structure was already allocated during dquot_initialize so
3378	 * we will be updating only the data blocks + inodes */
3379	ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
3380#endif
3381
3382	return ret;
3383}
3384
3385/*
3386 * The caller must have previously called ext3_reserve_inode_write().
3387 * Give this, we know that the caller already has write access to iloc->bh.
3388 */
3389int ext3_mark_iloc_dirty(handle_t *handle,
3390		struct inode *inode, struct ext3_iloc *iloc)
3391{
3392	int err = 0;
3393
3394	/* the do_update_inode consumes one bh->b_count */
3395	get_bh(iloc->bh);
3396
3397	/* ext3_do_update_inode() does journal_dirty_metadata */
3398	err = ext3_do_update_inode(handle, inode, iloc);
3399	put_bh(iloc->bh);
3400	return err;
3401}
3402
3403/*
3404 * On success, We end up with an outstanding reference count against
3405 * iloc->bh.  This _must_ be cleaned up later.
3406 */
3407
3408int
3409ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3410			 struct ext3_iloc *iloc)
3411{
3412	int err = 0;
3413	if (handle) {
3414		err = ext3_get_inode_loc(inode, iloc);
3415		if (!err) {
3416			BUFFER_TRACE(iloc->bh, "get_write_access");
3417			err = ext3_journal_get_write_access(handle, iloc->bh);
3418			if (err) {
3419				brelse(iloc->bh);
3420				iloc->bh = NULL;
3421			}
3422		}
3423	}
3424	ext3_std_error(inode->i_sb, err);
3425	return err;
3426}
3427
3428/*
3429 * What we do here is to mark the in-core inode as clean with respect to inode
3430 * dirtiness (it may still be data-dirty).
3431 * This means that the in-core inode may be reaped by prune_icache
3432 * without having to perform any I/O.  This is a very good thing,
3433 * because *any* task may call prune_icache - even ones which
3434 * have a transaction open against a different journal.
3435 *
3436 * Is this cheating?  Not really.  Sure, we haven't written the
3437 * inode out, but prune_icache isn't a user-visible syncing function.
3438 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3439 * we start and wait on commits.
3440 *
3441 * Is this efficient/effective?  Well, we're being nice to the system
3442 * by cleaning up our inodes proactively so they can be reaped
3443 * without I/O.  But we are potentially leaving up to five seconds'
3444 * worth of inodes floating about which prune_icache wants us to
3445 * write out.  One way to fix that would be to get prune_icache()
3446 * to do a write_super() to free up some memory.  It has the desired
3447 * effect.
3448 */
3449int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3450{
3451	struct ext3_iloc iloc;
3452	int err;
3453
3454	might_sleep();
3455	trace_ext3_mark_inode_dirty(inode, _RET_IP_);
3456	err = ext3_reserve_inode_write(handle, inode, &iloc);
3457	if (!err)
3458		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3459	return err;
3460}
3461
3462/*
3463 * ext3_dirty_inode() is called from __mark_inode_dirty()
3464 *
3465 * We're really interested in the case where a file is being extended.
3466 * i_size has been changed by generic_commit_write() and we thus need
3467 * to include the updated inode in the current transaction.
3468 *
3469 * Also, dquot_alloc_space() will always dirty the inode when blocks
3470 * are allocated to the file.
3471 *
3472 * If the inode is marked synchronous, we don't honour that here - doing
3473 * so would cause a commit on atime updates, which we don't bother doing.
3474 * We handle synchronous inodes at the highest possible level.
3475 */
3476void ext3_dirty_inode(struct inode *inode, int flags)
3477{
3478	handle_t *current_handle = ext3_journal_current_handle();
3479	handle_t *handle;
3480
3481	handle = ext3_journal_start(inode, 2);
3482	if (IS_ERR(handle))
3483		goto out;
3484	if (current_handle &&
3485		current_handle->h_transaction != handle->h_transaction) {
3486		/* This task has a transaction open against a different fs */
3487		printk(KERN_EMERG "%s: transactions do not match!\n",
3488		       __func__);
3489	} else {
3490		jbd_debug(5, "marking dirty.  outer handle=%p\n",
3491				current_handle);
3492		ext3_mark_inode_dirty(handle, inode);
3493	}
3494	ext3_journal_stop(handle);
3495out:
3496	return;
3497}
3498
3499#if 0
3500/*
3501 * Bind an inode's backing buffer_head into this transaction, to prevent
3502 * it from being flushed to disk early.  Unlike
3503 * ext3_reserve_inode_write, this leaves behind no bh reference and
3504 * returns no iloc structure, so the caller needs to repeat the iloc
3505 * lookup to mark the inode dirty later.
3506 */
3507static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3508{
3509	struct ext3_iloc iloc;
3510
3511	int err = 0;
3512	if (handle) {
3513		err = ext3_get_inode_loc(inode, &iloc);
3514		if (!err) {
3515			BUFFER_TRACE(iloc.bh, "get_write_access");
3516			err = journal_get_write_access(handle, iloc.bh);
3517			if (!err)
3518				err = ext3_journal_dirty_metadata(handle,
3519								  iloc.bh);
3520			brelse(iloc.bh);
3521		}
3522	}
3523	ext3_std_error(inode->i_sb, err);
3524	return err;
3525}
3526#endif
3527
3528int ext3_change_inode_journal_flag(struct inode *inode, int val)
3529{
3530	journal_t *journal;
3531	handle_t *handle;
3532	int err;
3533
3534	/*
3535	 * We have to be very careful here: changing a data block's
3536	 * journaling status dynamically is dangerous.  If we write a
3537	 * data block to the journal, change the status and then delete
3538	 * that block, we risk forgetting to revoke the old log record
3539	 * from the journal and so a subsequent replay can corrupt data.
3540	 * So, first we make sure that the journal is empty and that
3541	 * nobody is changing anything.
3542	 */
3543
3544	journal = EXT3_JOURNAL(inode);
3545	if (is_journal_aborted(journal))
3546		return -EROFS;
3547
3548	journal_lock_updates(journal);
3549	journal_flush(journal);
3550
3551	/*
3552	 * OK, there are no updates running now, and all cached data is
3553	 * synced to disk.  We are now in a completely consistent state
3554	 * which doesn't have anything in the journal, and we know that
3555	 * no filesystem updates are running, so it is safe to modify
3556	 * the inode's in-core data-journaling state flag now.
3557	 */
3558
3559	if (val)
3560		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3561	else
3562		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3563	ext3_set_aops(inode);
3564
3565	journal_unlock_updates(journal);
3566
3567	/* Finally we can mark the inode as dirty. */
3568
3569	handle = ext3_journal_start(inode, 1);
3570	if (IS_ERR(handle))
3571		return PTR_ERR(handle);
3572
3573	err = ext3_mark_inode_dirty(handle, inode);
3574	handle->h_sync = 1;
3575	ext3_journal_stop(handle);
3576	ext3_std_error(inode->i_sb, err);
3577
3578	return err;
3579}