Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  linux/fs/ext3/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  Goal-directed block allocation by Stephen Tweedie
  16 *	(sct@redhat.com), 1993, 1998
  17 *  Big-endian to little-endian byte-swapping/bitmaps by
  18 *        David S. Miller (davem@caip.rutgers.edu), 1995
  19 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  20 *	(jj@sunsite.ms.mff.cuni.cz)
  21 *
  22 *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  23 */
  24
  25#include <linux/module.h>
  26#include <linux/fs.h>
  27#include <linux/time.h>
  28#include <linux/ext3_jbd.h>
  29#include <linux/jbd.h>
  30#include <linux/highuid.h>
  31#include <linux/pagemap.h>
  32#include <linux/quotaops.h>
  33#include <linux/string.h>
  34#include <linux/buffer_head.h>
  35#include <linux/writeback.h>
  36#include <linux/mpage.h>
  37#include <linux/uio.h>
  38#include <linux/bio.h>
  39#include <linux/fiemap.h>
  40#include <linux/namei.h>
  41#include <trace/events/ext3.h>
 
  42#include "xattr.h"
  43#include "acl.h"
  44
  45static int ext3_writepage_trans_blocks(struct inode *inode);
  46static int ext3_block_truncate_page(struct inode *inode, loff_t from);
  47
  48/*
  49 * Test whether an inode is a fast symlink.
  50 */
  51static int ext3_inode_is_fast_symlink(struct inode *inode)
  52{
  53	int ea_blocks = EXT3_I(inode)->i_file_acl ?
  54		(inode->i_sb->s_blocksize >> 9) : 0;
  55
  56	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  57}
  58
  59/*
  60 * The ext3 forget function must perform a revoke if we are freeing data
  61 * which has been journaled.  Metadata (eg. indirect blocks) must be
  62 * revoked in all cases.
  63 *
  64 * "bh" may be NULL: a metadata block may have been freed from memory
  65 * but there may still be a record of it in the journal, and that record
  66 * still needs to be revoked.
  67 */
  68int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  69			struct buffer_head *bh, ext3_fsblk_t blocknr)
  70{
  71	int err;
  72
  73	might_sleep();
  74
  75	trace_ext3_forget(inode, is_metadata, blocknr);
  76	BUFFER_TRACE(bh, "enter");
  77
  78	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  79		  "data mode %lx\n",
  80		  bh, is_metadata, inode->i_mode,
  81		  test_opt(inode->i_sb, DATA_FLAGS));
  82
  83	/* Never use the revoke function if we are doing full data
  84	 * journaling: there is no need to, and a V1 superblock won't
  85	 * support it.  Otherwise, only skip the revoke on un-journaled
  86	 * data blocks. */
  87
  88	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
  89	    (!is_metadata && !ext3_should_journal_data(inode))) {
  90		if (bh) {
  91			BUFFER_TRACE(bh, "call journal_forget");
  92			return ext3_journal_forget(handle, bh);
  93		}
  94		return 0;
  95	}
  96
  97	/*
  98	 * data!=journal && (is_metadata || should_journal_data(inode))
  99	 */
 100	BUFFER_TRACE(bh, "call ext3_journal_revoke");
 101	err = ext3_journal_revoke(handle, blocknr, bh);
 102	if (err)
 103		ext3_abort(inode->i_sb, __func__,
 104			   "error %d when attempting revoke", err);
 105	BUFFER_TRACE(bh, "exit");
 106	return err;
 107}
 108
 109/*
 110 * Work out how many blocks we need to proceed with the next chunk of a
 111 * truncate transaction.
 112 */
 113static unsigned long blocks_for_truncate(struct inode *inode)
 114{
 115	unsigned long needed;
 116
 117	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
 118
 119	/* Give ourselves just enough room to cope with inodes in which
 120	 * i_blocks is corrupt: we've seen disk corruptions in the past
 121	 * which resulted in random data in an inode which looked enough
 122	 * like a regular file for ext3 to try to delete it.  Things
 123	 * will go a bit crazy if that happens, but at least we should
 124	 * try not to panic the whole kernel. */
 125	if (needed < 2)
 126		needed = 2;
 127
 128	/* But we need to bound the transaction so we don't overflow the
 129	 * journal. */
 130	if (needed > EXT3_MAX_TRANS_DATA)
 131		needed = EXT3_MAX_TRANS_DATA;
 132
 133	return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
 134}
 135
 136/*
 137 * Truncate transactions can be complex and absolutely huge.  So we need to
 138 * be able to restart the transaction at a conventient checkpoint to make
 139 * sure we don't overflow the journal.
 140 *
 141 * start_transaction gets us a new handle for a truncate transaction,
 142 * and extend_transaction tries to extend the existing one a bit.  If
 143 * extend fails, we need to propagate the failure up and restart the
 144 * transaction in the top-level truncate loop. --sct
 145 */
 146static handle_t *start_transaction(struct inode *inode)
 147{
 148	handle_t *result;
 149
 150	result = ext3_journal_start(inode, blocks_for_truncate(inode));
 151	if (!IS_ERR(result))
 152		return result;
 153
 154	ext3_std_error(inode->i_sb, PTR_ERR(result));
 155	return result;
 156}
 157
 158/*
 159 * Try to extend this transaction for the purposes of truncation.
 160 *
 161 * Returns 0 if we managed to create more room.  If we can't create more
 162 * room, and the transaction must be restarted we return 1.
 163 */
 164static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
 165{
 166	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
 167		return 0;
 168	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
 169		return 0;
 170	return 1;
 171}
 172
 173/*
 174 * Restart the transaction associated with *handle.  This does a commit,
 175 * so before we call here everything must be consistently dirtied against
 176 * this transaction.
 177 */
 178static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
 179{
 180	int ret;
 181
 182	jbd_debug(2, "restarting handle %p\n", handle);
 183	/*
 184	 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
 185	 * At this moment, get_block can be called only for blocks inside
 186	 * i_size since page cache has been already dropped and writes are
 187	 * blocked by i_mutex. So we can safely drop the truncate_mutex.
 188	 */
 189	mutex_unlock(&EXT3_I(inode)->truncate_mutex);
 190	ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
 191	mutex_lock(&EXT3_I(inode)->truncate_mutex);
 192	return ret;
 193}
 194
 195/*
 196 * Called at inode eviction from icache
 197 */
 198void ext3_evict_inode (struct inode *inode)
 199{
 200	struct ext3_inode_info *ei = EXT3_I(inode);
 201	struct ext3_block_alloc_info *rsv;
 202	handle_t *handle;
 203	int want_delete = 0;
 204
 205	trace_ext3_evict_inode(inode);
 206	if (!inode->i_nlink && !is_bad_inode(inode)) {
 207		dquot_initialize(inode);
 208		want_delete = 1;
 209	}
 210
 211	/*
 212	 * When journalling data dirty buffers are tracked only in the journal.
 213	 * So although mm thinks everything is clean and ready for reaping the
 214	 * inode might still have some pages to write in the running
 215	 * transaction or waiting to be checkpointed. Thus calling
 216	 * journal_invalidatepage() (via truncate_inode_pages()) to discard
 217	 * these buffers can cause data loss. Also even if we did not discard
 218	 * these buffers, we would have no way to find them after the inode
 219	 * is reaped and thus user could see stale data if he tries to read
 220	 * them before the transaction is checkpointed. So be careful and
 221	 * force everything to disk here... We use ei->i_datasync_tid to
 222	 * store the newest transaction containing inode's data.
 223	 *
 224	 * Note that directories do not have this problem because they don't
 225	 * use page cache.
 
 
 
 226	 */
 227	if (inode->i_nlink && ext3_should_journal_data(inode) &&
 228	    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
 
 
 229		tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
 230		journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
 231
 232		log_start_commit(journal, commit_tid);
 233		log_wait_commit(journal, commit_tid);
 234		filemap_write_and_wait(&inode->i_data);
 235	}
 236	truncate_inode_pages(&inode->i_data, 0);
 237
 238	ext3_discard_reservation(inode);
 239	rsv = ei->i_block_alloc_info;
 240	ei->i_block_alloc_info = NULL;
 241	if (unlikely(rsv))
 242		kfree(rsv);
 243
 244	if (!want_delete)
 245		goto no_delete;
 246
 247	handle = start_transaction(inode);
 248	if (IS_ERR(handle)) {
 249		/*
 250		 * If we're going to skip the normal cleanup, we still need to
 251		 * make sure that the in-core orphan linked list is properly
 252		 * cleaned up.
 253		 */
 254		ext3_orphan_del(NULL, inode);
 255		goto no_delete;
 256	}
 257
 258	if (IS_SYNC(inode))
 259		handle->h_sync = 1;
 260	inode->i_size = 0;
 261	if (inode->i_blocks)
 262		ext3_truncate(inode);
 263	/*
 264	 * Kill off the orphan record created when the inode lost the last
 265	 * link.  Note that ext3_orphan_del() has to be able to cope with the
 266	 * deletion of a non-existent orphan - ext3_truncate() could
 267	 * have removed the record.
 268	 */
 269	ext3_orphan_del(handle, inode);
 270	ei->i_dtime = get_seconds();
 271
 272	/*
 273	 * One subtle ordering requirement: if anything has gone wrong
 274	 * (transaction abort, IO errors, whatever), then we can still
 275	 * do these next steps (the fs will already have been marked as
 276	 * having errors), but we can't free the inode if the mark_dirty
 277	 * fails.
 278	 */
 279	if (ext3_mark_inode_dirty(handle, inode)) {
 280		/* If that failed, just dquot_drop() and be done with that */
 281		dquot_drop(inode);
 282		end_writeback(inode);
 283	} else {
 284		ext3_xattr_delete_inode(handle, inode);
 285		dquot_free_inode(inode);
 286		dquot_drop(inode);
 287		end_writeback(inode);
 288		ext3_free_inode(handle, inode);
 289	}
 290	ext3_journal_stop(handle);
 291	return;
 292no_delete:
 293	end_writeback(inode);
 294	dquot_drop(inode);
 295}
 296
 297typedef struct {
 298	__le32	*p;
 299	__le32	key;
 300	struct buffer_head *bh;
 301} Indirect;
 302
 303static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
 304{
 305	p->key = *(p->p = v);
 306	p->bh = bh;
 307}
 308
 309static int verify_chain(Indirect *from, Indirect *to)
 310{
 311	while (from <= to && from->key == *from->p)
 312		from++;
 313	return (from > to);
 314}
 315
 316/**
 317 *	ext3_block_to_path - parse the block number into array of offsets
 318 *	@inode: inode in question (we are only interested in its superblock)
 319 *	@i_block: block number to be parsed
 320 *	@offsets: array to store the offsets in
 321 *      @boundary: set this non-zero if the referred-to block is likely to be
 322 *             followed (on disk) by an indirect block.
 323 *
 324 *	To store the locations of file's data ext3 uses a data structure common
 325 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 326 *	data blocks at leaves and indirect blocks in intermediate nodes.
 327 *	This function translates the block number into path in that tree -
 328 *	return value is the path length and @offsets[n] is the offset of
 329 *	pointer to (n+1)th node in the nth one. If @block is out of range
 330 *	(negative or too large) warning is printed and zero returned.
 331 *
 332 *	Note: function doesn't find node addresses, so no IO is needed. All
 333 *	we need to know is the capacity of indirect blocks (taken from the
 334 *	inode->i_sb).
 335 */
 336
 337/*
 338 * Portability note: the last comparison (check that we fit into triple
 339 * indirect block) is spelled differently, because otherwise on an
 340 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 341 * if our filesystem had 8Kb blocks. We might use long long, but that would
 342 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 343 * i_block would have to be negative in the very beginning, so we would not
 344 * get there at all.
 345 */
 346
 347static int ext3_block_to_path(struct inode *inode,
 348			long i_block, int offsets[4], int *boundary)
 349{
 350	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
 351	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
 352	const long direct_blocks = EXT3_NDIR_BLOCKS,
 353		indirect_blocks = ptrs,
 354		double_blocks = (1 << (ptrs_bits * 2));
 355	int n = 0;
 356	int final = 0;
 357
 358	if (i_block < 0) {
 359		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
 360	} else if (i_block < direct_blocks) {
 361		offsets[n++] = i_block;
 362		final = direct_blocks;
 363	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
 364		offsets[n++] = EXT3_IND_BLOCK;
 365		offsets[n++] = i_block;
 366		final = ptrs;
 367	} else if ((i_block -= indirect_blocks) < double_blocks) {
 368		offsets[n++] = EXT3_DIND_BLOCK;
 369		offsets[n++] = i_block >> ptrs_bits;
 370		offsets[n++] = i_block & (ptrs - 1);
 371		final = ptrs;
 372	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
 373		offsets[n++] = EXT3_TIND_BLOCK;
 374		offsets[n++] = i_block >> (ptrs_bits * 2);
 375		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
 376		offsets[n++] = i_block & (ptrs - 1);
 377		final = ptrs;
 378	} else {
 379		ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
 380	}
 381	if (boundary)
 382		*boundary = final - 1 - (i_block & (ptrs - 1));
 383	return n;
 384}
 385
 386/**
 387 *	ext3_get_branch - read the chain of indirect blocks leading to data
 388 *	@inode: inode in question
 389 *	@depth: depth of the chain (1 - direct pointer, etc.)
 390 *	@offsets: offsets of pointers in inode/indirect blocks
 391 *	@chain: place to store the result
 392 *	@err: here we store the error value
 393 *
 394 *	Function fills the array of triples <key, p, bh> and returns %NULL
 395 *	if everything went OK or the pointer to the last filled triple
 396 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 397 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 398 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 399 *	number (it points into struct inode for i==0 and into the bh->b_data
 400 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 401 *	block for i>0 and NULL for i==0. In other words, it holds the block
 402 *	numbers of the chain, addresses they were taken from (and where we can
 403 *	verify that chain did not change) and buffer_heads hosting these
 404 *	numbers.
 405 *
 406 *	Function stops when it stumbles upon zero pointer (absent block)
 407 *		(pointer to last triple returned, *@err == 0)
 408 *	or when it gets an IO error reading an indirect block
 409 *		(ditto, *@err == -EIO)
 410 *	or when it notices that chain had been changed while it was reading
 411 *		(ditto, *@err == -EAGAIN)
 412 *	or when it reads all @depth-1 indirect blocks successfully and finds
 413 *	the whole chain, all way to the data (returns %NULL, *err == 0).
 414 */
 415static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
 416				 Indirect chain[4], int *err)
 417{
 418	struct super_block *sb = inode->i_sb;
 419	Indirect *p = chain;
 420	struct buffer_head *bh;
 421
 422	*err = 0;
 423	/* i_data is not going away, no lock needed */
 424	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
 425	if (!p->key)
 426		goto no_block;
 427	while (--depth) {
 428		bh = sb_bread(sb, le32_to_cpu(p->key));
 429		if (!bh)
 430			goto failure;
 431		/* Reader: pointers */
 432		if (!verify_chain(chain, p))
 433			goto changed;
 434		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
 435		/* Reader: end */
 436		if (!p->key)
 437			goto no_block;
 438	}
 439	return NULL;
 440
 441changed:
 442	brelse(bh);
 443	*err = -EAGAIN;
 444	goto no_block;
 445failure:
 446	*err = -EIO;
 447no_block:
 448	return p;
 449}
 450
 451/**
 452 *	ext3_find_near - find a place for allocation with sufficient locality
 453 *	@inode: owner
 454 *	@ind: descriptor of indirect block.
 455 *
 456 *	This function returns the preferred place for block allocation.
 457 *	It is used when heuristic for sequential allocation fails.
 458 *	Rules are:
 459 *	  + if there is a block to the left of our position - allocate near it.
 460 *	  + if pointer will live in indirect block - allocate near that block.
 461 *	  + if pointer will live in inode - allocate in the same
 462 *	    cylinder group.
 463 *
 464 * In the latter case we colour the starting block by the callers PID to
 465 * prevent it from clashing with concurrent allocations for a different inode
 466 * in the same block group.   The PID is used here so that functionally related
 467 * files will be close-by on-disk.
 468 *
 469 *	Caller must make sure that @ind is valid and will stay that way.
 470 */
 471static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
 472{
 473	struct ext3_inode_info *ei = EXT3_I(inode);
 474	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
 475	__le32 *p;
 476	ext3_fsblk_t bg_start;
 477	ext3_grpblk_t colour;
 478
 479	/* Try to find previous block */
 480	for (p = ind->p - 1; p >= start; p--) {
 481		if (*p)
 482			return le32_to_cpu(*p);
 483	}
 484
 485	/* No such thing, so let's try location of indirect block */
 486	if (ind->bh)
 487		return ind->bh->b_blocknr;
 488
 489	/*
 490	 * It is going to be referred to from the inode itself? OK, just put it
 491	 * into the same cylinder group then.
 492	 */
 493	bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
 494	colour = (current->pid % 16) *
 495			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
 496	return bg_start + colour;
 497}
 498
 499/**
 500 *	ext3_find_goal - find a preferred place for allocation.
 501 *	@inode: owner
 502 *	@block:  block we want
 503 *	@partial: pointer to the last triple within a chain
 504 *
 505 *	Normally this function find the preferred place for block allocation,
 506 *	returns it.
 507 */
 508
 509static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
 510				   Indirect *partial)
 511{
 512	struct ext3_block_alloc_info *block_i;
 513
 514	block_i =  EXT3_I(inode)->i_block_alloc_info;
 515
 516	/*
 517	 * try the heuristic for sequential allocation,
 518	 * failing that at least try to get decent locality.
 519	 */
 520	if (block_i && (block == block_i->last_alloc_logical_block + 1)
 521		&& (block_i->last_alloc_physical_block != 0)) {
 522		return block_i->last_alloc_physical_block + 1;
 523	}
 524
 525	return ext3_find_near(inode, partial);
 526}
 527
 528/**
 529 *	ext3_blks_to_allocate - Look up the block map and count the number
 530 *	of direct blocks need to be allocated for the given branch.
 531 *
 532 *	@branch: chain of indirect blocks
 533 *	@k: number of blocks need for indirect blocks
 534 *	@blks: number of data blocks to be mapped.
 535 *	@blocks_to_boundary:  the offset in the indirect block
 536 *
 537 *	return the total number of blocks to be allocate, including the
 538 *	direct and indirect blocks.
 539 */
 540static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
 541		int blocks_to_boundary)
 542{
 543	unsigned long count = 0;
 544
 545	/*
 546	 * Simple case, [t,d]Indirect block(s) has not allocated yet
 547	 * then it's clear blocks on that path have not allocated
 548	 */
 549	if (k > 0) {
 550		/* right now we don't handle cross boundary allocation */
 551		if (blks < blocks_to_boundary + 1)
 552			count += blks;
 553		else
 554			count += blocks_to_boundary + 1;
 555		return count;
 556	}
 557
 558	count++;
 559	while (count < blks && count <= blocks_to_boundary &&
 560		le32_to_cpu(*(branch[0].p + count)) == 0) {
 561		count++;
 562	}
 563	return count;
 564}
 565
 566/**
 567 *	ext3_alloc_blocks - multiple allocate blocks needed for a branch
 568 *	@handle: handle for this transaction
 569 *	@inode: owner
 570 *	@goal: preferred place for allocation
 571 *	@indirect_blks: the number of blocks need to allocate for indirect
 572 *			blocks
 573 *	@blks:	number of blocks need to allocated for direct blocks
 574 *	@new_blocks: on return it will store the new block numbers for
 575 *	the indirect blocks(if needed) and the first direct block,
 576 *	@err: here we store the error value
 577 *
 578 *	return the number of direct blocks allocated
 579 */
 580static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
 581			ext3_fsblk_t goal, int indirect_blks, int blks,
 582			ext3_fsblk_t new_blocks[4], int *err)
 583{
 584	int target, i;
 585	unsigned long count = 0;
 586	int index = 0;
 587	ext3_fsblk_t current_block = 0;
 588	int ret = 0;
 589
 590	/*
 591	 * Here we try to allocate the requested multiple blocks at once,
 592	 * on a best-effort basis.
 593	 * To build a branch, we should allocate blocks for
 594	 * the indirect blocks(if not allocated yet), and at least
 595	 * the first direct block of this branch.  That's the
 596	 * minimum number of blocks need to allocate(required)
 597	 */
 598	target = blks + indirect_blks;
 599
 600	while (1) {
 601		count = target;
 602		/* allocating blocks for indirect blocks and direct blocks */
 603		current_block = ext3_new_blocks(handle,inode,goal,&count,err);
 604		if (*err)
 605			goto failed_out;
 606
 607		target -= count;
 608		/* allocate blocks for indirect blocks */
 609		while (index < indirect_blks && count) {
 610			new_blocks[index++] = current_block++;
 611			count--;
 612		}
 613
 614		if (count > 0)
 615			break;
 616	}
 617
 618	/* save the new block number for the first direct block */
 619	new_blocks[index] = current_block;
 620
 621	/* total number of blocks allocated for direct blocks */
 622	ret = count;
 623	*err = 0;
 624	return ret;
 625failed_out:
 626	for (i = 0; i <index; i++)
 627		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 628	return ret;
 629}
 630
 631/**
 632 *	ext3_alloc_branch - allocate and set up a chain of blocks.
 633 *	@handle: handle for this transaction
 634 *	@inode: owner
 635 *	@indirect_blks: number of allocated indirect blocks
 636 *	@blks: number of allocated direct blocks
 637 *	@goal: preferred place for allocation
 638 *	@offsets: offsets (in the blocks) to store the pointers to next.
 639 *	@branch: place to store the chain in.
 640 *
 641 *	This function allocates blocks, zeroes out all but the last one,
 642 *	links them into chain and (if we are synchronous) writes them to disk.
 643 *	In other words, it prepares a branch that can be spliced onto the
 644 *	inode. It stores the information about that chain in the branch[], in
 645 *	the same format as ext3_get_branch() would do. We are calling it after
 646 *	we had read the existing part of chain and partial points to the last
 647 *	triple of that (one with zero ->key). Upon the exit we have the same
 648 *	picture as after the successful ext3_get_block(), except that in one
 649 *	place chain is disconnected - *branch->p is still zero (we did not
 650 *	set the last link), but branch->key contains the number that should
 651 *	be placed into *branch->p to fill that gap.
 652 *
 653 *	If allocation fails we free all blocks we've allocated (and forget
 654 *	their buffer_heads) and return the error value the from failed
 655 *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
 656 *	as described above and return 0.
 657 */
 658static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
 659			int indirect_blks, int *blks, ext3_fsblk_t goal,
 660			int *offsets, Indirect *branch)
 661{
 662	int blocksize = inode->i_sb->s_blocksize;
 663	int i, n = 0;
 664	int err = 0;
 665	struct buffer_head *bh;
 666	int num;
 667	ext3_fsblk_t new_blocks[4];
 668	ext3_fsblk_t current_block;
 669
 670	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
 671				*blks, new_blocks, &err);
 672	if (err)
 673		return err;
 674
 675	branch[0].key = cpu_to_le32(new_blocks[0]);
 676	/*
 677	 * metadata blocks and data blocks are allocated.
 678	 */
 679	for (n = 1; n <= indirect_blks;  n++) {
 680		/*
 681		 * Get buffer_head for parent block, zero it out
 682		 * and set the pointer to new one, then send
 683		 * parent to disk.
 684		 */
 685		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
 
 
 
 
 686		branch[n].bh = bh;
 687		lock_buffer(bh);
 688		BUFFER_TRACE(bh, "call get_create_access");
 689		err = ext3_journal_get_create_access(handle, bh);
 690		if (err) {
 691			unlock_buffer(bh);
 692			brelse(bh);
 693			goto failed;
 694		}
 695
 696		memset(bh->b_data, 0, blocksize);
 697		branch[n].p = (__le32 *) bh->b_data + offsets[n];
 698		branch[n].key = cpu_to_le32(new_blocks[n]);
 699		*branch[n].p = branch[n].key;
 700		if ( n == indirect_blks) {
 701			current_block = new_blocks[n];
 702			/*
 703			 * End of chain, update the last new metablock of
 704			 * the chain to point to the new allocated
 705			 * data blocks numbers
 706			 */
 707			for (i=1; i < num; i++)
 708				*(branch[n].p + i) = cpu_to_le32(++current_block);
 709		}
 710		BUFFER_TRACE(bh, "marking uptodate");
 711		set_buffer_uptodate(bh);
 712		unlock_buffer(bh);
 713
 714		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
 715		err = ext3_journal_dirty_metadata(handle, bh);
 716		if (err)
 717			goto failed;
 718	}
 719	*blks = num;
 720	return err;
 721failed:
 722	/* Allocation failed, free what we already allocated */
 723	for (i = 1; i <= n ; i++) {
 724		BUFFER_TRACE(branch[i].bh, "call journal_forget");
 725		ext3_journal_forget(handle, branch[i].bh);
 726	}
 727	for (i = 0; i <indirect_blks; i++)
 728		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 729
 730	ext3_free_blocks(handle, inode, new_blocks[i], num);
 731
 732	return err;
 733}
 734
 735/**
 736 * ext3_splice_branch - splice the allocated branch onto inode.
 737 * @handle: handle for this transaction
 738 * @inode: owner
 739 * @block: (logical) number of block we are adding
 740 * @where: location of missing link
 741 * @num:   number of indirect blocks we are adding
 742 * @blks:  number of direct blocks we are adding
 743 *
 744 * This function fills the missing link and does all housekeeping needed in
 745 * inode (->i_blocks, etc.). In case of success we end up with the full
 746 * chain to new block and return 0.
 747 */
 748static int ext3_splice_branch(handle_t *handle, struct inode *inode,
 749			long block, Indirect *where, int num, int blks)
 750{
 751	int i;
 752	int err = 0;
 753	struct ext3_block_alloc_info *block_i;
 754	ext3_fsblk_t current_block;
 755	struct ext3_inode_info *ei = EXT3_I(inode);
 
 756
 757	block_i = ei->i_block_alloc_info;
 758	/*
 759	 * If we're splicing into a [td]indirect block (as opposed to the
 760	 * inode) then we need to get write access to the [td]indirect block
 761	 * before the splice.
 762	 */
 763	if (where->bh) {
 764		BUFFER_TRACE(where->bh, "get_write_access");
 765		err = ext3_journal_get_write_access(handle, where->bh);
 766		if (err)
 767			goto err_out;
 768	}
 769	/* That's it */
 770
 771	*where->p = where->key;
 772
 773	/*
 774	 * Update the host buffer_head or inode to point to more just allocated
 775	 * direct blocks blocks
 776	 */
 777	if (num == 0 && blks > 1) {
 778		current_block = le32_to_cpu(where->key) + 1;
 779		for (i = 1; i < blks; i++)
 780			*(where->p + i ) = cpu_to_le32(current_block++);
 781	}
 782
 783	/*
 784	 * update the most recently allocated logical & physical block
 785	 * in i_block_alloc_info, to assist find the proper goal block for next
 786	 * allocation
 787	 */
 788	if (block_i) {
 789		block_i->last_alloc_logical_block = block + blks - 1;
 790		block_i->last_alloc_physical_block =
 791				le32_to_cpu(where[num].key) + blks - 1;
 792	}
 793
 794	/* We are done with atomic stuff, now do the rest of housekeeping */
 795
 796	inode->i_ctime = CURRENT_TIME_SEC;
 797	ext3_mark_inode_dirty(handle, inode);
 
 
 798	/* ext3_mark_inode_dirty already updated i_sync_tid */
 799	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
 800
 801	/* had we spliced it onto indirect block? */
 802	if (where->bh) {
 803		/*
 804		 * If we spliced it onto an indirect block, we haven't
 805		 * altered the inode.  Note however that if it is being spliced
 806		 * onto an indirect block at the very end of the file (the
 807		 * file is growing) then we *will* alter the inode to reflect
 808		 * the new i_size.  But that is not done here - it is done in
 809		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
 810		 */
 811		jbd_debug(5, "splicing indirect only\n");
 812		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
 813		err = ext3_journal_dirty_metadata(handle, where->bh);
 814		if (err)
 815			goto err_out;
 816	} else {
 817		/*
 818		 * OK, we spliced it into the inode itself on a direct block.
 819		 * Inode was dirtied above.
 820		 */
 821		jbd_debug(5, "splicing direct\n");
 822	}
 823	return err;
 824
 825err_out:
 826	for (i = 1; i <= num; i++) {
 827		BUFFER_TRACE(where[i].bh, "call journal_forget");
 828		ext3_journal_forget(handle, where[i].bh);
 829		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
 830	}
 831	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
 832
 833	return err;
 834}
 835
 836/*
 837 * Allocation strategy is simple: if we have to allocate something, we will
 838 * have to go the whole way to leaf. So let's do it before attaching anything
 839 * to tree, set linkage between the newborn blocks, write them if sync is
 840 * required, recheck the path, free and repeat if check fails, otherwise
 841 * set the last missing link (that will protect us from any truncate-generated
 842 * removals - all blocks on the path are immune now) and possibly force the
 843 * write on the parent block.
 844 * That has a nice additional property: no special recovery from the failed
 845 * allocations is needed - we simply release blocks and do not touch anything
 846 * reachable from inode.
 847 *
 848 * `handle' can be NULL if create == 0.
 849 *
 850 * The BKL may not be held on entry here.  Be sure to take it early.
 851 * return > 0, # of blocks mapped or allocated.
 852 * return = 0, if plain lookup failed.
 853 * return < 0, error case.
 854 */
 855int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
 856		sector_t iblock, unsigned long maxblocks,
 857		struct buffer_head *bh_result,
 858		int create)
 859{
 860	int err = -EIO;
 861	int offsets[4];
 862	Indirect chain[4];
 863	Indirect *partial;
 864	ext3_fsblk_t goal;
 865	int indirect_blks;
 866	int blocks_to_boundary = 0;
 867	int depth;
 868	struct ext3_inode_info *ei = EXT3_I(inode);
 869	int count = 0;
 870	ext3_fsblk_t first_block = 0;
 871
 872
 873	trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
 874	J_ASSERT(handle != NULL || create == 0);
 875	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 876
 877	if (depth == 0)
 878		goto out;
 879
 880	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 881
 882	/* Simplest case - block found, no allocation needed */
 883	if (!partial) {
 884		first_block = le32_to_cpu(chain[depth - 1].key);
 885		clear_buffer_new(bh_result);
 886		count++;
 887		/*map more blocks*/
 888		while (count < maxblocks && count <= blocks_to_boundary) {
 889			ext3_fsblk_t blk;
 890
 891			if (!verify_chain(chain, chain + depth - 1)) {
 892				/*
 893				 * Indirect block might be removed by
 894				 * truncate while we were reading it.
 895				 * Handling of that case: forget what we've
 896				 * got now. Flag the err as EAGAIN, so it
 897				 * will reread.
 898				 */
 899				err = -EAGAIN;
 900				count = 0;
 901				break;
 902			}
 903			blk = le32_to_cpu(*(chain[depth-1].p + count));
 904
 905			if (blk == first_block + count)
 906				count++;
 907			else
 908				break;
 909		}
 910		if (err != -EAGAIN)
 911			goto got_it;
 912	}
 913
 914	/* Next simple case - plain lookup or failed read of indirect block */
 915	if (!create || err == -EIO)
 916		goto cleanup;
 917
 918	/*
 919	 * Block out ext3_truncate while we alter the tree
 920	 */
 921	mutex_lock(&ei->truncate_mutex);
 922
 923	/*
 924	 * If the indirect block is missing while we are reading
 925	 * the chain(ext3_get_branch() returns -EAGAIN err), or
 926	 * if the chain has been changed after we grab the semaphore,
 927	 * (either because another process truncated this branch, or
 928	 * another get_block allocated this branch) re-grab the chain to see if
 929	 * the request block has been allocated or not.
 930	 *
 931	 * Since we already block the truncate/other get_block
 932	 * at this point, we will have the current copy of the chain when we
 933	 * splice the branch into the tree.
 934	 */
 935	if (err == -EAGAIN || !verify_chain(chain, partial)) {
 936		while (partial > chain) {
 937			brelse(partial->bh);
 938			partial--;
 939		}
 940		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 941		if (!partial) {
 942			count++;
 943			mutex_unlock(&ei->truncate_mutex);
 944			if (err)
 945				goto cleanup;
 946			clear_buffer_new(bh_result);
 947			goto got_it;
 948		}
 949	}
 950
 951	/*
 952	 * Okay, we need to do block allocation.  Lazily initialize the block
 953	 * allocation info here if necessary
 954	*/
 955	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
 956		ext3_init_block_alloc_info(inode);
 957
 958	goal = ext3_find_goal(inode, iblock, partial);
 959
 960	/* the number of blocks need to allocate for [d,t]indirect blocks */
 961	indirect_blks = (chain + depth) - partial - 1;
 962
 963	/*
 964	 * Next look up the indirect map to count the totoal number of
 965	 * direct blocks to allocate for this branch.
 966	 */
 967	count = ext3_blks_to_allocate(partial, indirect_blks,
 968					maxblocks, blocks_to_boundary);
 969	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
 970				offsets + (partial - chain), partial);
 971
 972	/*
 973	 * The ext3_splice_branch call will free and forget any buffers
 974	 * on the new chain if there is a failure, but that risks using
 975	 * up transaction credits, especially for bitmaps where the
 976	 * credits cannot be returned.  Can we handle this somehow?  We
 977	 * may need to return -EAGAIN upwards in the worst case.  --sct
 978	 */
 979	if (!err)
 980		err = ext3_splice_branch(handle, inode, iblock,
 981					partial, indirect_blks, count);
 982	mutex_unlock(&ei->truncate_mutex);
 983	if (err)
 984		goto cleanup;
 985
 986	set_buffer_new(bh_result);
 987got_it:
 988	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
 989	if (count > blocks_to_boundary)
 990		set_buffer_boundary(bh_result);
 991	err = count;
 992	/* Clean up and exit */
 993	partial = chain + depth - 1;	/* the whole chain */
 994cleanup:
 995	while (partial > chain) {
 996		BUFFER_TRACE(partial->bh, "call brelse");
 997		brelse(partial->bh);
 998		partial--;
 999	}
1000	BUFFER_TRACE(bh_result, "returned");
1001out:
1002	trace_ext3_get_blocks_exit(inode, iblock,
1003				   depth ? le32_to_cpu(chain[depth-1].key) : 0,
1004				   count, err);
1005	return err;
1006}
1007
1008/* Maximum number of blocks we map for direct IO at once. */
1009#define DIO_MAX_BLOCKS 4096
1010/*
1011 * Number of credits we need for writing DIO_MAX_BLOCKS:
1012 * We need sb + group descriptor + bitmap + inode -> 4
1013 * For B blocks with A block pointers per block we need:
1014 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
1015 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
1016 */
1017#define DIO_CREDITS 25
1018
1019static int ext3_get_block(struct inode *inode, sector_t iblock,
1020			struct buffer_head *bh_result, int create)
1021{
1022	handle_t *handle = ext3_journal_current_handle();
1023	int ret = 0, started = 0;
1024	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1025
1026	if (create && !handle) {	/* Direct IO write... */
1027		if (max_blocks > DIO_MAX_BLOCKS)
1028			max_blocks = DIO_MAX_BLOCKS;
1029		handle = ext3_journal_start(inode, DIO_CREDITS +
1030				EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
1031		if (IS_ERR(handle)) {
1032			ret = PTR_ERR(handle);
1033			goto out;
1034		}
1035		started = 1;
1036	}
1037
1038	ret = ext3_get_blocks_handle(handle, inode, iblock,
1039					max_blocks, bh_result, create);
1040	if (ret > 0) {
1041		bh_result->b_size = (ret << inode->i_blkbits);
1042		ret = 0;
1043	}
1044	if (started)
1045		ext3_journal_stop(handle);
1046out:
1047	return ret;
1048}
1049
1050int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1051		u64 start, u64 len)
1052{
1053	return generic_block_fiemap(inode, fieinfo, start, len,
1054				    ext3_get_block);
1055}
1056
1057/*
1058 * `handle' can be NULL if create is zero
1059 */
1060struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1061				long block, int create, int *errp)
1062{
1063	struct buffer_head dummy;
1064	int fatal = 0, err;
1065
1066	J_ASSERT(handle != NULL || create == 0);
1067
1068	dummy.b_state = 0;
1069	dummy.b_blocknr = -1000;
1070	buffer_trace_init(&dummy.b_history);
1071	err = ext3_get_blocks_handle(handle, inode, block, 1,
1072					&dummy, create);
1073	/*
1074	 * ext3_get_blocks_handle() returns number of blocks
1075	 * mapped. 0 in case of a HOLE.
1076	 */
1077	if (err > 0) {
1078		if (err > 1)
1079			WARN_ON(1);
1080		err = 0;
1081	}
1082	*errp = err;
1083	if (!err && buffer_mapped(&dummy)) {
1084		struct buffer_head *bh;
1085		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1086		if (!bh) {
1087			*errp = -EIO;
1088			goto err;
1089		}
1090		if (buffer_new(&dummy)) {
1091			J_ASSERT(create != 0);
1092			J_ASSERT(handle != NULL);
1093
1094			/*
1095			 * Now that we do not always journal data, we should
1096			 * keep in mind whether this should always journal the
1097			 * new buffer as metadata.  For now, regular file
1098			 * writes use ext3_get_block instead, so it's not a
1099			 * problem.
1100			 */
1101			lock_buffer(bh);
1102			BUFFER_TRACE(bh, "call get_create_access");
1103			fatal = ext3_journal_get_create_access(handle, bh);
1104			if (!fatal && !buffer_uptodate(bh)) {
1105				memset(bh->b_data,0,inode->i_sb->s_blocksize);
1106				set_buffer_uptodate(bh);
1107			}
1108			unlock_buffer(bh);
1109			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1110			err = ext3_journal_dirty_metadata(handle, bh);
1111			if (!fatal)
1112				fatal = err;
1113		} else {
1114			BUFFER_TRACE(bh, "not a new buffer");
1115		}
1116		if (fatal) {
1117			*errp = fatal;
1118			brelse(bh);
1119			bh = NULL;
1120		}
1121		return bh;
1122	}
1123err:
1124	return NULL;
1125}
1126
1127struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1128			       int block, int create, int *err)
1129{
1130	struct buffer_head * bh;
1131
1132	bh = ext3_getblk(handle, inode, block, create, err);
1133	if (!bh)
1134		return bh;
1135	if (buffer_uptodate(bh))
1136		return bh;
1137	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
 
 
1138	wait_on_buffer(bh);
1139	if (buffer_uptodate(bh))
1140		return bh;
1141	put_bh(bh);
1142	*err = -EIO;
1143	return NULL;
1144}
1145
1146static int walk_page_buffers(	handle_t *handle,
1147				struct buffer_head *head,
1148				unsigned from,
1149				unsigned to,
1150				int *partial,
1151				int (*fn)(	handle_t *handle,
1152						struct buffer_head *bh))
1153{
1154	struct buffer_head *bh;
1155	unsigned block_start, block_end;
1156	unsigned blocksize = head->b_size;
1157	int err, ret = 0;
1158	struct buffer_head *next;
1159
1160	for (	bh = head, block_start = 0;
1161		ret == 0 && (bh != head || !block_start);
1162		block_start = block_end, bh = next)
1163	{
1164		next = bh->b_this_page;
1165		block_end = block_start + blocksize;
1166		if (block_end <= from || block_start >= to) {
1167			if (partial && !buffer_uptodate(bh))
1168				*partial = 1;
1169			continue;
1170		}
1171		err = (*fn)(handle, bh);
1172		if (!ret)
1173			ret = err;
1174	}
1175	return ret;
1176}
1177
1178/*
1179 * To preserve ordering, it is essential that the hole instantiation and
1180 * the data write be encapsulated in a single transaction.  We cannot
1181 * close off a transaction and start a new one between the ext3_get_block()
1182 * and the commit_write().  So doing the journal_start at the start of
1183 * prepare_write() is the right place.
1184 *
1185 * Also, this function can nest inside ext3_writepage() ->
1186 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1187 * has generated enough buffer credits to do the whole page.  So we won't
1188 * block on the journal in that case, which is good, because the caller may
1189 * be PF_MEMALLOC.
1190 *
1191 * By accident, ext3 can be reentered when a transaction is open via
1192 * quota file writes.  If we were to commit the transaction while thus
1193 * reentered, there can be a deadlock - we would be holding a quota
1194 * lock, and the commit would never complete if another thread had a
1195 * transaction open and was blocking on the quota lock - a ranking
1196 * violation.
1197 *
1198 * So what we do is to rely on the fact that journal_stop/journal_start
1199 * will _not_ run commit under these circumstances because handle->h_ref
1200 * is elevated.  We'll still have enough credits for the tiny quotafile
1201 * write.
1202 */
1203static int do_journal_get_write_access(handle_t *handle,
1204					struct buffer_head *bh)
1205{
1206	int dirty = buffer_dirty(bh);
1207	int ret;
1208
1209	if (!buffer_mapped(bh) || buffer_freed(bh))
1210		return 0;
1211	/*
1212	 * __block_prepare_write() could have dirtied some buffers. Clean
1213	 * the dirty bit as jbd2_journal_get_write_access() could complain
1214	 * otherwise about fs integrity issues. Setting of the dirty bit
1215	 * by __block_prepare_write() isn't a real problem here as we clear
1216	 * the bit before releasing a page lock and thus writeback cannot
1217	 * ever write the buffer.
1218	 */
1219	if (dirty)
1220		clear_buffer_dirty(bh);
1221	ret = ext3_journal_get_write_access(handle, bh);
1222	if (!ret && dirty)
1223		ret = ext3_journal_dirty_metadata(handle, bh);
1224	return ret;
1225}
1226
1227/*
1228 * Truncate blocks that were not used by write. We have to truncate the
1229 * pagecache as well so that corresponding buffers get properly unmapped.
1230 */
1231static void ext3_truncate_failed_write(struct inode *inode)
1232{
1233	truncate_inode_pages(inode->i_mapping, inode->i_size);
1234	ext3_truncate(inode);
1235}
1236
1237/*
1238 * Truncate blocks that were not used by direct IO write. We have to zero out
1239 * the last file block as well because direct IO might have written to it.
1240 */
1241static void ext3_truncate_failed_direct_write(struct inode *inode)
1242{
1243	ext3_block_truncate_page(inode, inode->i_size);
1244	ext3_truncate(inode);
1245}
1246
1247static int ext3_write_begin(struct file *file, struct address_space *mapping,
1248				loff_t pos, unsigned len, unsigned flags,
1249				struct page **pagep, void **fsdata)
1250{
1251	struct inode *inode = mapping->host;
1252	int ret;
1253	handle_t *handle;
1254	int retries = 0;
1255	struct page *page;
1256	pgoff_t index;
1257	unsigned from, to;
1258	/* Reserve one block more for addition to orphan list in case
1259	 * we allocate blocks but write fails for some reason */
1260	int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
1261
1262	trace_ext3_write_begin(inode, pos, len, flags);
1263
1264	index = pos >> PAGE_CACHE_SHIFT;
1265	from = pos & (PAGE_CACHE_SIZE - 1);
1266	to = from + len;
1267
1268retry:
1269	page = grab_cache_page_write_begin(mapping, index, flags);
1270	if (!page)
1271		return -ENOMEM;
1272	*pagep = page;
1273
1274	handle = ext3_journal_start(inode, needed_blocks);
1275	if (IS_ERR(handle)) {
1276		unlock_page(page);
1277		page_cache_release(page);
1278		ret = PTR_ERR(handle);
1279		goto out;
1280	}
1281	ret = __block_write_begin(page, pos, len, ext3_get_block);
1282	if (ret)
1283		goto write_begin_failed;
1284
1285	if (ext3_should_journal_data(inode)) {
1286		ret = walk_page_buffers(handle, page_buffers(page),
1287				from, to, NULL, do_journal_get_write_access);
1288	}
1289write_begin_failed:
1290	if (ret) {
1291		/*
1292		 * block_write_begin may have instantiated a few blocks
1293		 * outside i_size.  Trim these off again. Don't need
1294		 * i_size_read because we hold i_mutex.
1295		 *
1296		 * Add inode to orphan list in case we crash before truncate
1297		 * finishes. Do this only if ext3_can_truncate() agrees so
1298		 * that orphan processing code is happy.
1299		 */
1300		if (pos + len > inode->i_size && ext3_can_truncate(inode))
1301			ext3_orphan_add(handle, inode);
1302		ext3_journal_stop(handle);
1303		unlock_page(page);
1304		page_cache_release(page);
1305		if (pos + len > inode->i_size)
1306			ext3_truncate_failed_write(inode);
1307	}
1308	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1309		goto retry;
1310out:
1311	return ret;
1312}
1313
1314
1315int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1316{
1317	int err = journal_dirty_data(handle, bh);
1318	if (err)
1319		ext3_journal_abort_handle(__func__, __func__,
1320						bh, handle, err);
1321	return err;
1322}
1323
1324/* For ordered writepage and write_end functions */
1325static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1326{
1327	/*
1328	 * Write could have mapped the buffer but it didn't copy the data in
1329	 * yet. So avoid filing such buffer into a transaction.
1330	 */
1331	if (buffer_mapped(bh) && buffer_uptodate(bh))
1332		return ext3_journal_dirty_data(handle, bh);
1333	return 0;
1334}
1335
1336/* For write_end() in data=journal mode */
1337static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1338{
1339	if (!buffer_mapped(bh) || buffer_freed(bh))
1340		return 0;
1341	set_buffer_uptodate(bh);
1342	return ext3_journal_dirty_metadata(handle, bh);
1343}
1344
1345/*
1346 * This is nasty and subtle: ext3_write_begin() could have allocated blocks
1347 * for the whole page but later we failed to copy the data in. Update inode
1348 * size according to what we managed to copy. The rest is going to be
1349 * truncated in write_end function.
1350 */
1351static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
1352{
1353	/* What matters to us is i_disksize. We don't write i_size anywhere */
1354	if (pos + copied > inode->i_size)
1355		i_size_write(inode, pos + copied);
1356	if (pos + copied > EXT3_I(inode)->i_disksize) {
1357		EXT3_I(inode)->i_disksize = pos + copied;
1358		mark_inode_dirty(inode);
1359	}
1360}
1361
1362/*
1363 * We need to pick up the new inode size which generic_commit_write gave us
1364 * `file' can be NULL - eg, when called from page_symlink().
1365 *
1366 * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1367 * buffers are managed internally.
1368 */
1369static int ext3_ordered_write_end(struct file *file,
1370				struct address_space *mapping,
1371				loff_t pos, unsigned len, unsigned copied,
1372				struct page *page, void *fsdata)
1373{
1374	handle_t *handle = ext3_journal_current_handle();
1375	struct inode *inode = file->f_mapping->host;
1376	unsigned from, to;
1377	int ret = 0, ret2;
1378
1379	trace_ext3_ordered_write_end(inode, pos, len, copied);
1380	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1381
1382	from = pos & (PAGE_CACHE_SIZE - 1);
1383	to = from + copied;
1384	ret = walk_page_buffers(handle, page_buffers(page),
1385		from, to, NULL, journal_dirty_data_fn);
1386
1387	if (ret == 0)
1388		update_file_sizes(inode, pos, copied);
1389	/*
1390	 * There may be allocated blocks outside of i_size because
1391	 * we failed to copy some data. Prepare for truncate.
1392	 */
1393	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1394		ext3_orphan_add(handle, inode);
1395	ret2 = ext3_journal_stop(handle);
1396	if (!ret)
1397		ret = ret2;
1398	unlock_page(page);
1399	page_cache_release(page);
1400
1401	if (pos + len > inode->i_size)
1402		ext3_truncate_failed_write(inode);
1403	return ret ? ret : copied;
1404}
1405
1406static int ext3_writeback_write_end(struct file *file,
1407				struct address_space *mapping,
1408				loff_t pos, unsigned len, unsigned copied,
1409				struct page *page, void *fsdata)
1410{
1411	handle_t *handle = ext3_journal_current_handle();
1412	struct inode *inode = file->f_mapping->host;
1413	int ret;
1414
1415	trace_ext3_writeback_write_end(inode, pos, len, copied);
1416	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1417	update_file_sizes(inode, pos, copied);
1418	/*
1419	 * There may be allocated blocks outside of i_size because
1420	 * we failed to copy some data. Prepare for truncate.
1421	 */
1422	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1423		ext3_orphan_add(handle, inode);
1424	ret = ext3_journal_stop(handle);
1425	unlock_page(page);
1426	page_cache_release(page);
1427
1428	if (pos + len > inode->i_size)
1429		ext3_truncate_failed_write(inode);
1430	return ret ? ret : copied;
1431}
1432
1433static int ext3_journalled_write_end(struct file *file,
1434				struct address_space *mapping,
1435				loff_t pos, unsigned len, unsigned copied,
1436				struct page *page, void *fsdata)
1437{
1438	handle_t *handle = ext3_journal_current_handle();
1439	struct inode *inode = mapping->host;
1440	struct ext3_inode_info *ei = EXT3_I(inode);
1441	int ret = 0, ret2;
1442	int partial = 0;
1443	unsigned from, to;
1444
1445	trace_ext3_journalled_write_end(inode, pos, len, copied);
1446	from = pos & (PAGE_CACHE_SIZE - 1);
1447	to = from + len;
1448
1449	if (copied < len) {
1450		if (!PageUptodate(page))
1451			copied = 0;
1452		page_zero_new_buffers(page, from + copied, to);
1453		to = from + copied;
1454	}
1455
1456	ret = walk_page_buffers(handle, page_buffers(page), from,
1457				to, &partial, write_end_fn);
1458	if (!partial)
1459		SetPageUptodate(page);
1460
1461	if (pos + copied > inode->i_size)
1462		i_size_write(inode, pos + copied);
1463	/*
1464	 * There may be allocated blocks outside of i_size because
1465	 * we failed to copy some data. Prepare for truncate.
1466	 */
1467	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1468		ext3_orphan_add(handle, inode);
1469	ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1470	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
1471	if (inode->i_size > ei->i_disksize) {
1472		ei->i_disksize = inode->i_size;
1473		ret2 = ext3_mark_inode_dirty(handle, inode);
1474		if (!ret)
1475			ret = ret2;
1476	}
1477
1478	ret2 = ext3_journal_stop(handle);
1479	if (!ret)
1480		ret = ret2;
1481	unlock_page(page);
1482	page_cache_release(page);
1483
1484	if (pos + len > inode->i_size)
1485		ext3_truncate_failed_write(inode);
1486	return ret ? ret : copied;
1487}
1488
1489/*
1490 * bmap() is special.  It gets used by applications such as lilo and by
1491 * the swapper to find the on-disk block of a specific piece of data.
1492 *
1493 * Naturally, this is dangerous if the block concerned is still in the
1494 * journal.  If somebody makes a swapfile on an ext3 data-journaling
1495 * filesystem and enables swap, then they may get a nasty shock when the
1496 * data getting swapped to that swapfile suddenly gets overwritten by
1497 * the original zero's written out previously to the journal and
1498 * awaiting writeback in the kernel's buffer cache.
1499 *
1500 * So, if we see any bmap calls here on a modified, data-journaled file,
1501 * take extra steps to flush any blocks which might be in the cache.
1502 */
1503static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1504{
1505	struct inode *inode = mapping->host;
1506	journal_t *journal;
1507	int err;
1508
1509	if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
1510		/*
1511		 * This is a REALLY heavyweight approach, but the use of
1512		 * bmap on dirty files is expected to be extremely rare:
1513		 * only if we run lilo or swapon on a freshly made file
1514		 * do we expect this to happen.
1515		 *
1516		 * (bmap requires CAP_SYS_RAWIO so this does not
1517		 * represent an unprivileged user DOS attack --- we'd be
1518		 * in trouble if mortal users could trigger this path at
1519		 * will.)
1520		 *
1521		 * NB. EXT3_STATE_JDATA is not set on files other than
1522		 * regular files.  If somebody wants to bmap a directory
1523		 * or symlink and gets confused because the buffer
1524		 * hasn't yet been flushed to disk, they deserve
1525		 * everything they get.
1526		 */
1527
1528		ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
1529		journal = EXT3_JOURNAL(inode);
1530		journal_lock_updates(journal);
1531		err = journal_flush(journal);
1532		journal_unlock_updates(journal);
1533
1534		if (err)
1535			return 0;
1536	}
1537
1538	return generic_block_bmap(mapping,block,ext3_get_block);
1539}
1540
1541static int bget_one(handle_t *handle, struct buffer_head *bh)
1542{
1543	get_bh(bh);
1544	return 0;
1545}
1546
1547static int bput_one(handle_t *handle, struct buffer_head *bh)
1548{
1549	put_bh(bh);
1550	return 0;
1551}
1552
1553static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
1554{
1555	return !buffer_mapped(bh);
1556}
1557
1558/*
1559 * Note that we always start a transaction even if we're not journalling
1560 * data.  This is to preserve ordering: any hole instantiation within
1561 * __block_write_full_page -> ext3_get_block() should be journalled
1562 * along with the data so we don't crash and then get metadata which
1563 * refers to old data.
1564 *
1565 * In all journalling modes block_write_full_page() will start the I/O.
1566 *
1567 * Problem:
1568 *
1569 *	ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1570 *		ext3_writepage()
1571 *
1572 * Similar for:
1573 *
1574 *	ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1575 *
1576 * Same applies to ext3_get_block().  We will deadlock on various things like
1577 * lock_journal and i_truncate_mutex.
1578 *
1579 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1580 * allocations fail.
1581 *
1582 * 16May01: If we're reentered then journal_current_handle() will be
1583 *	    non-zero. We simply *return*.
1584 *
1585 * 1 July 2001: @@@ FIXME:
1586 *   In journalled data mode, a data buffer may be metadata against the
1587 *   current transaction.  But the same file is part of a shared mapping
1588 *   and someone does a writepage() on it.
1589 *
1590 *   We will move the buffer onto the async_data list, but *after* it has
1591 *   been dirtied. So there's a small window where we have dirty data on
1592 *   BJ_Metadata.
1593 *
1594 *   Note that this only applies to the last partial page in the file.  The
1595 *   bit which block_write_full_page() uses prepare/commit for.  (That's
1596 *   broken code anyway: it's wrong for msync()).
1597 *
1598 *   It's a rare case: affects the final partial page, for journalled data
1599 *   where the file is subject to bith write() and writepage() in the same
1600 *   transction.  To fix it we'll need a custom block_write_full_page().
1601 *   We'll probably need that anyway for journalling writepage() output.
1602 *
1603 * We don't honour synchronous mounts for writepage().  That would be
1604 * disastrous.  Any write() or metadata operation will sync the fs for
1605 * us.
1606 *
1607 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1608 * we don't need to open a transaction here.
1609 */
1610static int ext3_ordered_writepage(struct page *page,
1611				struct writeback_control *wbc)
1612{
1613	struct inode *inode = page->mapping->host;
1614	struct buffer_head *page_bufs;
1615	handle_t *handle = NULL;
1616	int ret = 0;
1617	int err;
1618
1619	J_ASSERT(PageLocked(page));
1620	WARN_ON_ONCE(IS_RDONLY(inode));
 
 
 
 
 
 
1621
1622	/*
1623	 * We give up here if we're reentered, because it might be for a
1624	 * different filesystem.
1625	 */
1626	if (ext3_journal_current_handle())
1627		goto out_fail;
1628
1629	trace_ext3_ordered_writepage(page);
1630	if (!page_has_buffers(page)) {
1631		create_empty_buffers(page, inode->i_sb->s_blocksize,
1632				(1 << BH_Dirty)|(1 << BH_Uptodate));
1633		page_bufs = page_buffers(page);
1634	} else {
1635		page_bufs = page_buffers(page);
1636		if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
1637				       NULL, buffer_unmapped)) {
1638			/* Provide NULL get_block() to catch bugs if buffers
1639			 * weren't really mapped */
1640			return block_write_full_page(page, NULL, wbc);
1641		}
1642	}
1643	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1644
1645	if (IS_ERR(handle)) {
1646		ret = PTR_ERR(handle);
1647		goto out_fail;
1648	}
1649
1650	walk_page_buffers(handle, page_bufs, 0,
1651			PAGE_CACHE_SIZE, NULL, bget_one);
1652
1653	ret = block_write_full_page(page, ext3_get_block, wbc);
1654
1655	/*
1656	 * The page can become unlocked at any point now, and
1657	 * truncate can then come in and change things.  So we
1658	 * can't touch *page from now on.  But *page_bufs is
1659	 * safe due to elevated refcount.
1660	 */
1661
1662	/*
1663	 * And attach them to the current transaction.  But only if
1664	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
1665	 * and generally junk.
1666	 */
1667	if (ret == 0) {
1668		err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1669					NULL, journal_dirty_data_fn);
1670		if (!ret)
1671			ret = err;
1672	}
1673	walk_page_buffers(handle, page_bufs, 0,
1674			PAGE_CACHE_SIZE, NULL, bput_one);
1675	err = ext3_journal_stop(handle);
1676	if (!ret)
1677		ret = err;
1678	return ret;
1679
1680out_fail:
1681	redirty_page_for_writepage(wbc, page);
1682	unlock_page(page);
1683	return ret;
1684}
1685
1686static int ext3_writeback_writepage(struct page *page,
1687				struct writeback_control *wbc)
1688{
1689	struct inode *inode = page->mapping->host;
1690	handle_t *handle = NULL;
1691	int ret = 0;
1692	int err;
1693
1694	J_ASSERT(PageLocked(page));
1695	WARN_ON_ONCE(IS_RDONLY(inode));
 
 
 
 
 
 
1696
1697	if (ext3_journal_current_handle())
1698		goto out_fail;
1699
1700	trace_ext3_writeback_writepage(page);
1701	if (page_has_buffers(page)) {
1702		if (!walk_page_buffers(NULL, page_buffers(page), 0,
1703				      PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
1704			/* Provide NULL get_block() to catch bugs if buffers
1705			 * weren't really mapped */
1706			return block_write_full_page(page, NULL, wbc);
1707		}
1708	}
1709
1710	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1711	if (IS_ERR(handle)) {
1712		ret = PTR_ERR(handle);
1713		goto out_fail;
1714	}
1715
1716	ret = block_write_full_page(page, ext3_get_block, wbc);
1717
1718	err = ext3_journal_stop(handle);
1719	if (!ret)
1720		ret = err;
1721	return ret;
1722
1723out_fail:
1724	redirty_page_for_writepage(wbc, page);
1725	unlock_page(page);
1726	return ret;
1727}
1728
1729static int ext3_journalled_writepage(struct page *page,
1730				struct writeback_control *wbc)
1731{
1732	struct inode *inode = page->mapping->host;
1733	handle_t *handle = NULL;
1734	int ret = 0;
1735	int err;
1736
1737	J_ASSERT(PageLocked(page));
1738	WARN_ON_ONCE(IS_RDONLY(inode));
 
 
 
 
 
 
1739
1740	if (ext3_journal_current_handle())
1741		goto no_write;
1742
1743	trace_ext3_journalled_writepage(page);
1744	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1745	if (IS_ERR(handle)) {
1746		ret = PTR_ERR(handle);
1747		goto no_write;
1748	}
1749
1750	if (!page_has_buffers(page) || PageChecked(page)) {
1751		/*
1752		 * It's mmapped pagecache.  Add buffers and journal it.  There
1753		 * doesn't seem much point in redirtying the page here.
1754		 */
1755		ClearPageChecked(page);
1756		ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
1757					  ext3_get_block);
1758		if (ret != 0) {
1759			ext3_journal_stop(handle);
1760			goto out_unlock;
1761		}
1762		ret = walk_page_buffers(handle, page_buffers(page), 0,
1763			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1764
1765		err = walk_page_buffers(handle, page_buffers(page), 0,
1766				PAGE_CACHE_SIZE, NULL, write_end_fn);
1767		if (ret == 0)
1768			ret = err;
1769		ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1770		atomic_set(&EXT3_I(inode)->i_datasync_tid,
1771			   handle->h_transaction->t_tid);
1772		unlock_page(page);
1773	} else {
1774		/*
1775		 * It may be a page full of checkpoint-mode buffers.  We don't
1776		 * really know unless we go poke around in the buffer_heads.
1777		 * But block_write_full_page will do the right thing.
1778		 */
1779		ret = block_write_full_page(page, ext3_get_block, wbc);
1780	}
1781	err = ext3_journal_stop(handle);
1782	if (!ret)
1783		ret = err;
1784out:
1785	return ret;
1786
1787no_write:
1788	redirty_page_for_writepage(wbc, page);
1789out_unlock:
1790	unlock_page(page);
1791	goto out;
1792}
1793
1794static int ext3_readpage(struct file *file, struct page *page)
1795{
1796	trace_ext3_readpage(page);
1797	return mpage_readpage(page, ext3_get_block);
1798}
1799
1800static int
1801ext3_readpages(struct file *file, struct address_space *mapping,
1802		struct list_head *pages, unsigned nr_pages)
1803{
1804	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1805}
1806
1807static void ext3_invalidatepage(struct page *page, unsigned long offset)
 
1808{
1809	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1810
1811	trace_ext3_invalidatepage(page, offset);
1812
1813	/*
1814	 * If it's a full truncate we just forget about the pending dirtying
1815	 */
1816	if (offset == 0)
1817		ClearPageChecked(page);
1818
1819	journal_invalidatepage(journal, page, offset);
1820}
1821
1822static int ext3_releasepage(struct page *page, gfp_t wait)
1823{
1824	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1825
1826	trace_ext3_releasepage(page);
1827	WARN_ON(PageChecked(page));
1828	if (!page_has_buffers(page))
1829		return 0;
1830	return journal_try_to_free_buffers(journal, page, wait);
1831}
1832
1833/*
1834 * If the O_DIRECT write will extend the file then add this inode to the
1835 * orphan list.  So recovery will truncate it back to the original size
1836 * if the machine crashes during the write.
1837 *
1838 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1839 * crashes then stale disk data _may_ be exposed inside the file. But current
1840 * VFS code falls back into buffered path in that case so we are safe.
1841 */
1842static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1843			const struct iovec *iov, loff_t offset,
1844			unsigned long nr_segs)
1845{
1846	struct file *file = iocb->ki_filp;
1847	struct inode *inode = file->f_mapping->host;
1848	struct ext3_inode_info *ei = EXT3_I(inode);
1849	handle_t *handle;
1850	ssize_t ret;
1851	int orphan = 0;
1852	size_t count = iov_length(iov, nr_segs);
1853	int retries = 0;
1854
1855	trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
1856
1857	if (rw == WRITE) {
1858		loff_t final_size = offset + count;
1859
1860		if (final_size > inode->i_size) {
1861			/* Credits for sb + inode write */
1862			handle = ext3_journal_start(inode, 2);
1863			if (IS_ERR(handle)) {
1864				ret = PTR_ERR(handle);
1865				goto out;
1866			}
1867			ret = ext3_orphan_add(handle, inode);
1868			if (ret) {
1869				ext3_journal_stop(handle);
1870				goto out;
1871			}
1872			orphan = 1;
1873			ei->i_disksize = inode->i_size;
1874			ext3_journal_stop(handle);
1875		}
1876	}
1877
1878retry:
1879	ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1880				 ext3_get_block);
1881	/*
1882	 * In case of error extending write may have instantiated a few
1883	 * blocks outside i_size. Trim these off again.
1884	 */
1885	if (unlikely((rw & WRITE) && ret < 0)) {
1886		loff_t isize = i_size_read(inode);
1887		loff_t end = offset + iov_length(iov, nr_segs);
1888
1889		if (end > isize)
1890			ext3_truncate_failed_direct_write(inode);
1891	}
1892	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1893		goto retry;
1894
1895	if (orphan) {
1896		int err;
1897
1898		/* Credits for sb + inode write */
1899		handle = ext3_journal_start(inode, 2);
1900		if (IS_ERR(handle)) {
1901			/* This is really bad luck. We've written the data
1902			 * but cannot extend i_size. Truncate allocated blocks
1903			 * and pretend the write failed... */
1904			ext3_truncate_failed_direct_write(inode);
1905			ret = PTR_ERR(handle);
 
 
1906			goto out;
1907		}
1908		if (inode->i_nlink)
1909			ext3_orphan_del(handle, inode);
1910		if (ret > 0) {
1911			loff_t end = offset + ret;
1912			if (end > inode->i_size) {
1913				ei->i_disksize = end;
1914				i_size_write(inode, end);
1915				/*
1916				 * We're going to return a positive `ret'
1917				 * here due to non-zero-length I/O, so there's
1918				 * no way of reporting error returns from
1919				 * ext3_mark_inode_dirty() to userspace.  So
1920				 * ignore it.
1921				 */
1922				ext3_mark_inode_dirty(handle, inode);
1923			}
1924		}
1925		err = ext3_journal_stop(handle);
1926		if (ret == 0)
1927			ret = err;
1928	}
1929out:
1930	trace_ext3_direct_IO_exit(inode, offset,
1931				iov_length(iov, nr_segs), rw, ret);
1932	return ret;
1933}
1934
1935/*
1936 * Pages can be marked dirty completely asynchronously from ext3's journalling
1937 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1938 * much here because ->set_page_dirty is called under VFS locks.  The page is
1939 * not necessarily locked.
1940 *
1941 * We cannot just dirty the page and leave attached buffers clean, because the
1942 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1943 * or jbddirty because all the journalling code will explode.
1944 *
1945 * So what we do is to mark the page "pending dirty" and next time writepage
1946 * is called, propagate that into the buffers appropriately.
1947 */
1948static int ext3_journalled_set_page_dirty(struct page *page)
1949{
1950	SetPageChecked(page);
1951	return __set_page_dirty_nobuffers(page);
1952}
1953
1954static const struct address_space_operations ext3_ordered_aops = {
1955	.readpage		= ext3_readpage,
1956	.readpages		= ext3_readpages,
1957	.writepage		= ext3_ordered_writepage,
1958	.write_begin		= ext3_write_begin,
1959	.write_end		= ext3_ordered_write_end,
1960	.bmap			= ext3_bmap,
1961	.invalidatepage		= ext3_invalidatepage,
1962	.releasepage		= ext3_releasepage,
1963	.direct_IO		= ext3_direct_IO,
1964	.migratepage		= buffer_migrate_page,
1965	.is_partially_uptodate  = block_is_partially_uptodate,
 
1966	.error_remove_page	= generic_error_remove_page,
1967};
1968
1969static const struct address_space_operations ext3_writeback_aops = {
1970	.readpage		= ext3_readpage,
1971	.readpages		= ext3_readpages,
1972	.writepage		= ext3_writeback_writepage,
1973	.write_begin		= ext3_write_begin,
1974	.write_end		= ext3_writeback_write_end,
1975	.bmap			= ext3_bmap,
1976	.invalidatepage		= ext3_invalidatepage,
1977	.releasepage		= ext3_releasepage,
1978	.direct_IO		= ext3_direct_IO,
1979	.migratepage		= buffer_migrate_page,
1980	.is_partially_uptodate  = block_is_partially_uptodate,
1981	.error_remove_page	= generic_error_remove_page,
1982};
1983
1984static const struct address_space_operations ext3_journalled_aops = {
1985	.readpage		= ext3_readpage,
1986	.readpages		= ext3_readpages,
1987	.writepage		= ext3_journalled_writepage,
1988	.write_begin		= ext3_write_begin,
1989	.write_end		= ext3_journalled_write_end,
1990	.set_page_dirty		= ext3_journalled_set_page_dirty,
1991	.bmap			= ext3_bmap,
1992	.invalidatepage		= ext3_invalidatepage,
1993	.releasepage		= ext3_releasepage,
1994	.is_partially_uptodate  = block_is_partially_uptodate,
1995	.error_remove_page	= generic_error_remove_page,
1996};
1997
1998void ext3_set_aops(struct inode *inode)
1999{
2000	if (ext3_should_order_data(inode))
2001		inode->i_mapping->a_ops = &ext3_ordered_aops;
2002	else if (ext3_should_writeback_data(inode))
2003		inode->i_mapping->a_ops = &ext3_writeback_aops;
2004	else
2005		inode->i_mapping->a_ops = &ext3_journalled_aops;
2006}
2007
2008/*
2009 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
2010 * up to the end of the block which corresponds to `from'.
2011 * This required during truncate. We need to physically zero the tail end
2012 * of that block so it doesn't yield old data if the file is later grown.
2013 */
2014static int ext3_block_truncate_page(struct inode *inode, loff_t from)
2015{
2016	ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
2017	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
2018	unsigned blocksize, iblock, length, pos;
2019	struct page *page;
2020	handle_t *handle = NULL;
2021	struct buffer_head *bh;
2022	int err = 0;
2023
2024	/* Truncated on block boundary - nothing to do */
2025	blocksize = inode->i_sb->s_blocksize;
2026	if ((from & (blocksize - 1)) == 0)
2027		return 0;
2028
2029	page = grab_cache_page(inode->i_mapping, index);
2030	if (!page)
2031		return -ENOMEM;
2032	length = blocksize - (offset & (blocksize - 1));
2033	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
2034
2035	if (!page_has_buffers(page))
2036		create_empty_buffers(page, blocksize, 0);
2037
2038	/* Find the buffer that contains "offset" */
2039	bh = page_buffers(page);
2040	pos = blocksize;
2041	while (offset >= pos) {
2042		bh = bh->b_this_page;
2043		iblock++;
2044		pos += blocksize;
2045	}
2046
2047	err = 0;
2048	if (buffer_freed(bh)) {
2049		BUFFER_TRACE(bh, "freed: skip");
2050		goto unlock;
2051	}
2052
2053	if (!buffer_mapped(bh)) {
2054		BUFFER_TRACE(bh, "unmapped");
2055		ext3_get_block(inode, iblock, bh, 0);
2056		/* unmapped? It's a hole - nothing to do */
2057		if (!buffer_mapped(bh)) {
2058			BUFFER_TRACE(bh, "still unmapped");
2059			goto unlock;
2060		}
2061	}
2062
2063	/* Ok, it's mapped. Make sure it's up-to-date */
2064	if (PageUptodate(page))
2065		set_buffer_uptodate(bh);
2066
2067	if (!buffer_uptodate(bh)) {
2068		err = -EIO;
2069		ll_rw_block(READ, 1, &bh);
2070		wait_on_buffer(bh);
2071		/* Uhhuh. Read error. Complain and punt. */
2072		if (!buffer_uptodate(bh))
2073			goto unlock;
2074	}
2075
2076	/* data=writeback mode doesn't need transaction to zero-out data */
2077	if (!ext3_should_writeback_data(inode)) {
2078		/* We journal at most one block */
2079		handle = ext3_journal_start(inode, 1);
2080		if (IS_ERR(handle)) {
2081			clear_highpage(page);
2082			flush_dcache_page(page);
2083			err = PTR_ERR(handle);
2084			goto unlock;
2085		}
2086	}
2087
2088	if (ext3_should_journal_data(inode)) {
2089		BUFFER_TRACE(bh, "get write access");
2090		err = ext3_journal_get_write_access(handle, bh);
2091		if (err)
2092			goto stop;
2093	}
2094
2095	zero_user(page, offset, length);
2096	BUFFER_TRACE(bh, "zeroed end of block");
2097
2098	err = 0;
2099	if (ext3_should_journal_data(inode)) {
2100		err = ext3_journal_dirty_metadata(handle, bh);
2101	} else {
2102		if (ext3_should_order_data(inode))
2103			err = ext3_journal_dirty_data(handle, bh);
2104		mark_buffer_dirty(bh);
2105	}
2106stop:
2107	if (handle)
2108		ext3_journal_stop(handle);
2109
2110unlock:
2111	unlock_page(page);
2112	page_cache_release(page);
2113	return err;
2114}
2115
2116/*
2117 * Probably it should be a library function... search for first non-zero word
2118 * or memcmp with zero_page, whatever is better for particular architecture.
2119 * Linus?
2120 */
2121static inline int all_zeroes(__le32 *p, __le32 *q)
2122{
2123	while (p < q)
2124		if (*p++)
2125			return 0;
2126	return 1;
2127}
2128
2129/**
2130 *	ext3_find_shared - find the indirect blocks for partial truncation.
2131 *	@inode:	  inode in question
2132 *	@depth:	  depth of the affected branch
2133 *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
2134 *	@chain:	  place to store the pointers to partial indirect blocks
2135 *	@top:	  place to the (detached) top of branch
2136 *
2137 *	This is a helper function used by ext3_truncate().
2138 *
2139 *	When we do truncate() we may have to clean the ends of several
2140 *	indirect blocks but leave the blocks themselves alive. Block is
2141 *	partially truncated if some data below the new i_size is referred
2142 *	from it (and it is on the path to the first completely truncated
2143 *	data block, indeed).  We have to free the top of that path along
2144 *	with everything to the right of the path. Since no allocation
2145 *	past the truncation point is possible until ext3_truncate()
2146 *	finishes, we may safely do the latter, but top of branch may
2147 *	require special attention - pageout below the truncation point
2148 *	might try to populate it.
2149 *
2150 *	We atomically detach the top of branch from the tree, store the
2151 *	block number of its root in *@top, pointers to buffer_heads of
2152 *	partially truncated blocks - in @chain[].bh and pointers to
2153 *	their last elements that should not be removed - in
2154 *	@chain[].p. Return value is the pointer to last filled element
2155 *	of @chain.
2156 *
2157 *	The work left to caller to do the actual freeing of subtrees:
2158 *		a) free the subtree starting from *@top
2159 *		b) free the subtrees whose roots are stored in
2160 *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
2161 *		c) free the subtrees growing from the inode past the @chain[0].
2162 *			(no partially truncated stuff there).  */
2163
2164static Indirect *ext3_find_shared(struct inode *inode, int depth,
2165			int offsets[4], Indirect chain[4], __le32 *top)
2166{
2167	Indirect *partial, *p;
2168	int k, err;
2169
2170	*top = 0;
2171	/* Make k index the deepest non-null offset + 1 */
2172	for (k = depth; k > 1 && !offsets[k-1]; k--)
2173		;
2174	partial = ext3_get_branch(inode, k, offsets, chain, &err);
2175	/* Writer: pointers */
2176	if (!partial)
2177		partial = chain + k-1;
2178	/*
2179	 * If the branch acquired continuation since we've looked at it -
2180	 * fine, it should all survive and (new) top doesn't belong to us.
2181	 */
2182	if (!partial->key && *partial->p)
2183		/* Writer: end */
2184		goto no_top;
2185	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2186		;
2187	/*
2188	 * OK, we've found the last block that must survive. The rest of our
2189	 * branch should be detached before unlocking. However, if that rest
2190	 * of branch is all ours and does not grow immediately from the inode
2191	 * it's easier to cheat and just decrement partial->p.
2192	 */
2193	if (p == chain + k - 1 && p > chain) {
2194		p->p--;
2195	} else {
2196		*top = *p->p;
2197		/* Nope, don't do this in ext3.  Must leave the tree intact */
2198#if 0
2199		*p->p = 0;
2200#endif
2201	}
2202	/* Writer: end */
2203
2204	while(partial > p) {
2205		brelse(partial->bh);
2206		partial--;
2207	}
2208no_top:
2209	return partial;
2210}
2211
2212/*
2213 * Zero a number of block pointers in either an inode or an indirect block.
2214 * If we restart the transaction we must again get write access to the
2215 * indirect block for further modification.
2216 *
2217 * We release `count' blocks on disk, but (last - first) may be greater
2218 * than `count' because there can be holes in there.
2219 */
2220static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2221		struct buffer_head *bh, ext3_fsblk_t block_to_free,
2222		unsigned long count, __le32 *first, __le32 *last)
2223{
2224	__le32 *p;
2225	if (try_to_extend_transaction(handle, inode)) {
2226		if (bh) {
2227			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2228			if (ext3_journal_dirty_metadata(handle, bh))
2229				return;
2230		}
2231		ext3_mark_inode_dirty(handle, inode);
2232		truncate_restart_transaction(handle, inode);
2233		if (bh) {
2234			BUFFER_TRACE(bh, "retaking write access");
2235			if (ext3_journal_get_write_access(handle, bh))
2236				return;
2237		}
2238	}
2239
2240	/*
2241	 * Any buffers which are on the journal will be in memory. We find
2242	 * them on the hash table so journal_revoke() will run journal_forget()
2243	 * on them.  We've already detached each block from the file, so
2244	 * bforget() in journal_forget() should be safe.
2245	 *
2246	 * AKPM: turn on bforget in journal_forget()!!!
2247	 */
2248	for (p = first; p < last; p++) {
2249		u32 nr = le32_to_cpu(*p);
2250		if (nr) {
2251			struct buffer_head *bh;
2252
2253			*p = 0;
2254			bh = sb_find_get_block(inode->i_sb, nr);
2255			ext3_forget(handle, 0, inode, bh, nr);
2256		}
2257	}
2258
2259	ext3_free_blocks(handle, inode, block_to_free, count);
2260}
2261
2262/**
2263 * ext3_free_data - free a list of data blocks
2264 * @handle:	handle for this transaction
2265 * @inode:	inode we are dealing with
2266 * @this_bh:	indirect buffer_head which contains *@first and *@last
2267 * @first:	array of block numbers
2268 * @last:	points immediately past the end of array
2269 *
2270 * We are freeing all blocks referred from that array (numbers are stored as
2271 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2272 *
2273 * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2274 * blocks are contiguous then releasing them at one time will only affect one
2275 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2276 * actually use a lot of journal space.
2277 *
2278 * @this_bh will be %NULL if @first and @last point into the inode's direct
2279 * block pointers.
2280 */
2281static void ext3_free_data(handle_t *handle, struct inode *inode,
2282			   struct buffer_head *this_bh,
2283			   __le32 *first, __le32 *last)
2284{
2285	ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2286	unsigned long count = 0;	    /* Number of blocks in the run */
2287	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
2288					       corresponding to
2289					       block_to_free */
2290	ext3_fsblk_t nr;		    /* Current block # */
2291	__le32 *p;			    /* Pointer into inode/ind
2292					       for current block */
2293	int err;
2294
2295	if (this_bh) {				/* For indirect block */
2296		BUFFER_TRACE(this_bh, "get_write_access");
2297		err = ext3_journal_get_write_access(handle, this_bh);
2298		/* Important: if we can't update the indirect pointers
2299		 * to the blocks, we can't free them. */
2300		if (err)
2301			return;
2302	}
2303
2304	for (p = first; p < last; p++) {
2305		nr = le32_to_cpu(*p);
2306		if (nr) {
2307			/* accumulate blocks to free if they're contiguous */
2308			if (count == 0) {
2309				block_to_free = nr;
2310				block_to_free_p = p;
2311				count = 1;
2312			} else if (nr == block_to_free + count) {
2313				count++;
2314			} else {
2315				ext3_clear_blocks(handle, inode, this_bh,
2316						  block_to_free,
2317						  count, block_to_free_p, p);
2318				block_to_free = nr;
2319				block_to_free_p = p;
2320				count = 1;
2321			}
2322		}
2323	}
2324
2325	if (count > 0)
2326		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2327				  count, block_to_free_p, p);
2328
2329	if (this_bh) {
2330		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2331
2332		/*
2333		 * The buffer head should have an attached journal head at this
2334		 * point. However, if the data is corrupted and an indirect
2335		 * block pointed to itself, it would have been detached when
2336		 * the block was cleared. Check for this instead of OOPSing.
2337		 */
2338		if (bh2jh(this_bh))
2339			ext3_journal_dirty_metadata(handle, this_bh);
2340		else
2341			ext3_error(inode->i_sb, "ext3_free_data",
2342				   "circular indirect block detected, "
2343				   "inode=%lu, block=%llu",
2344				   inode->i_ino,
2345				   (unsigned long long)this_bh->b_blocknr);
2346	}
2347}
2348
2349/**
2350 *	ext3_free_branches - free an array of branches
2351 *	@handle: JBD handle for this transaction
2352 *	@inode:	inode we are dealing with
2353 *	@parent_bh: the buffer_head which contains *@first and *@last
2354 *	@first:	array of block numbers
2355 *	@last:	pointer immediately past the end of array
2356 *	@depth:	depth of the branches to free
2357 *
2358 *	We are freeing all blocks referred from these branches (numbers are
2359 *	stored as little-endian 32-bit) and updating @inode->i_blocks
2360 *	appropriately.
2361 */
2362static void ext3_free_branches(handle_t *handle, struct inode *inode,
2363			       struct buffer_head *parent_bh,
2364			       __le32 *first, __le32 *last, int depth)
2365{
2366	ext3_fsblk_t nr;
2367	__le32 *p;
2368
2369	if (is_handle_aborted(handle))
2370		return;
2371
2372	if (depth--) {
2373		struct buffer_head *bh;
2374		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2375		p = last;
2376		while (--p >= first) {
2377			nr = le32_to_cpu(*p);
2378			if (!nr)
2379				continue;		/* A hole */
2380
2381			/* Go read the buffer for the next level down */
2382			bh = sb_bread(inode->i_sb, nr);
2383
2384			/*
2385			 * A read failure? Report error and clear slot
2386			 * (should be rare).
2387			 */
2388			if (!bh) {
2389				ext3_error(inode->i_sb, "ext3_free_branches",
2390					   "Read failure, inode=%lu, block="E3FSBLK,
2391					   inode->i_ino, nr);
2392				continue;
2393			}
2394
2395			/* This zaps the entire block.  Bottom up. */
2396			BUFFER_TRACE(bh, "free child branches");
2397			ext3_free_branches(handle, inode, bh,
2398					   (__le32*)bh->b_data,
2399					   (__le32*)bh->b_data + addr_per_block,
2400					   depth);
2401
2402			/*
2403			 * Everything below this this pointer has been
2404			 * released.  Now let this top-of-subtree go.
2405			 *
2406			 * We want the freeing of this indirect block to be
2407			 * atomic in the journal with the updating of the
2408			 * bitmap block which owns it.  So make some room in
2409			 * the journal.
2410			 *
2411			 * We zero the parent pointer *after* freeing its
2412			 * pointee in the bitmaps, so if extend_transaction()
2413			 * for some reason fails to put the bitmap changes and
2414			 * the release into the same transaction, recovery
2415			 * will merely complain about releasing a free block,
2416			 * rather than leaking blocks.
2417			 */
2418			if (is_handle_aborted(handle))
2419				return;
2420			if (try_to_extend_transaction(handle, inode)) {
2421				ext3_mark_inode_dirty(handle, inode);
2422				truncate_restart_transaction(handle, inode);
2423			}
2424
2425			/*
2426			 * We've probably journalled the indirect block several
2427			 * times during the truncate.  But it's no longer
2428			 * needed and we now drop it from the transaction via
2429			 * journal_revoke().
2430			 *
2431			 * That's easy if it's exclusively part of this
2432			 * transaction.  But if it's part of the committing
2433			 * transaction then journal_forget() will simply
2434			 * brelse() it.  That means that if the underlying
2435			 * block is reallocated in ext3_get_block(),
2436			 * unmap_underlying_metadata() will find this block
2437			 * and will try to get rid of it.  damn, damn. Thus
2438			 * we don't allow a block to be reallocated until
2439			 * a transaction freeing it has fully committed.
2440			 *
2441			 * We also have to make sure journal replay after a
2442			 * crash does not overwrite non-journaled data blocks
2443			 * with old metadata when the block got reallocated for
2444			 * data.  Thus we have to store a revoke record for a
2445			 * block in the same transaction in which we free the
2446			 * block.
2447			 */
2448			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2449
2450			ext3_free_blocks(handle, inode, nr, 1);
2451
2452			if (parent_bh) {
2453				/*
2454				 * The block which we have just freed is
2455				 * pointed to by an indirect block: journal it
2456				 */
2457				BUFFER_TRACE(parent_bh, "get_write_access");
2458				if (!ext3_journal_get_write_access(handle,
2459								   parent_bh)){
2460					*p = 0;
2461					BUFFER_TRACE(parent_bh,
2462					"call ext3_journal_dirty_metadata");
2463					ext3_journal_dirty_metadata(handle,
2464								    parent_bh);
2465				}
2466			}
2467		}
2468	} else {
2469		/* We have reached the bottom of the tree. */
2470		BUFFER_TRACE(parent_bh, "free data blocks");
2471		ext3_free_data(handle, inode, parent_bh, first, last);
2472	}
2473}
2474
2475int ext3_can_truncate(struct inode *inode)
2476{
2477	if (S_ISREG(inode->i_mode))
2478		return 1;
2479	if (S_ISDIR(inode->i_mode))
2480		return 1;
2481	if (S_ISLNK(inode->i_mode))
2482		return !ext3_inode_is_fast_symlink(inode);
2483	return 0;
2484}
2485
2486/*
2487 * ext3_truncate()
2488 *
2489 * We block out ext3_get_block() block instantiations across the entire
2490 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2491 * simultaneously on behalf of the same inode.
2492 *
2493 * As we work through the truncate and commmit bits of it to the journal there
2494 * is one core, guiding principle: the file's tree must always be consistent on
2495 * disk.  We must be able to restart the truncate after a crash.
2496 *
2497 * The file's tree may be transiently inconsistent in memory (although it
2498 * probably isn't), but whenever we close off and commit a journal transaction,
2499 * the contents of (the filesystem + the journal) must be consistent and
2500 * restartable.  It's pretty simple, really: bottom up, right to left (although
2501 * left-to-right works OK too).
2502 *
2503 * Note that at recovery time, journal replay occurs *before* the restart of
2504 * truncate against the orphan inode list.
2505 *
2506 * The committed inode has the new, desired i_size (which is the same as
2507 * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2508 * that this inode's truncate did not complete and it will again call
2509 * ext3_truncate() to have another go.  So there will be instantiated blocks
2510 * to the right of the truncation point in a crashed ext3 filesystem.  But
2511 * that's fine - as long as they are linked from the inode, the post-crash
2512 * ext3_truncate() run will find them and release them.
2513 */
2514void ext3_truncate(struct inode *inode)
2515{
2516	handle_t *handle;
2517	struct ext3_inode_info *ei = EXT3_I(inode);
2518	__le32 *i_data = ei->i_data;
2519	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2520	int offsets[4];
2521	Indirect chain[4];
2522	Indirect *partial;
2523	__le32 nr = 0;
2524	int n;
2525	long last_block;
2526	unsigned blocksize = inode->i_sb->s_blocksize;
2527
2528	trace_ext3_truncate_enter(inode);
2529
2530	if (!ext3_can_truncate(inode))
2531		goto out_notrans;
2532
2533	if (inode->i_size == 0 && ext3_should_writeback_data(inode))
2534		ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
2535
2536	handle = start_transaction(inode);
2537	if (IS_ERR(handle))
2538		goto out_notrans;
2539
2540	last_block = (inode->i_size + blocksize-1)
2541					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2542	n = ext3_block_to_path(inode, last_block, offsets, NULL);
2543	if (n == 0)
2544		goto out_stop;	/* error */
2545
2546	/*
2547	 * OK.  This truncate is going to happen.  We add the inode to the
2548	 * orphan list, so that if this truncate spans multiple transactions,
2549	 * and we crash, we will resume the truncate when the filesystem
2550	 * recovers.  It also marks the inode dirty, to catch the new size.
2551	 *
2552	 * Implication: the file must always be in a sane, consistent
2553	 * truncatable state while each transaction commits.
2554	 */
2555	if (ext3_orphan_add(handle, inode))
2556		goto out_stop;
2557
2558	/*
2559	 * The orphan list entry will now protect us from any crash which
2560	 * occurs before the truncate completes, so it is now safe to propagate
2561	 * the new, shorter inode size (held for now in i_size) into the
2562	 * on-disk inode. We do this via i_disksize, which is the value which
2563	 * ext3 *really* writes onto the disk inode.
2564	 */
2565	ei->i_disksize = inode->i_size;
2566
2567	/*
2568	 * From here we block out all ext3_get_block() callers who want to
2569	 * modify the block allocation tree.
2570	 */
2571	mutex_lock(&ei->truncate_mutex);
2572
2573	if (n == 1) {		/* direct blocks */
2574		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2575			       i_data + EXT3_NDIR_BLOCKS);
2576		goto do_indirects;
2577	}
2578
2579	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2580	/* Kill the top of shared branch (not detached) */
2581	if (nr) {
2582		if (partial == chain) {
2583			/* Shared branch grows from the inode */
2584			ext3_free_branches(handle, inode, NULL,
2585					   &nr, &nr+1, (chain+n-1) - partial);
2586			*partial->p = 0;
2587			/*
2588			 * We mark the inode dirty prior to restart,
2589			 * and prior to stop.  No need for it here.
2590			 */
2591		} else {
2592			/* Shared branch grows from an indirect block */
2593			ext3_free_branches(handle, inode, partial->bh,
2594					partial->p,
2595					partial->p+1, (chain+n-1) - partial);
2596		}
2597	}
2598	/* Clear the ends of indirect blocks on the shared branch */
2599	while (partial > chain) {
2600		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2601				   (__le32*)partial->bh->b_data+addr_per_block,
2602				   (chain+n-1) - partial);
2603		BUFFER_TRACE(partial->bh, "call brelse");
2604		brelse (partial->bh);
2605		partial--;
2606	}
2607do_indirects:
2608	/* Kill the remaining (whole) subtrees */
2609	switch (offsets[0]) {
2610	default:
2611		nr = i_data[EXT3_IND_BLOCK];
2612		if (nr) {
2613			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2614			i_data[EXT3_IND_BLOCK] = 0;
2615		}
2616	case EXT3_IND_BLOCK:
2617		nr = i_data[EXT3_DIND_BLOCK];
2618		if (nr) {
2619			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2620			i_data[EXT3_DIND_BLOCK] = 0;
2621		}
2622	case EXT3_DIND_BLOCK:
2623		nr = i_data[EXT3_TIND_BLOCK];
2624		if (nr) {
2625			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2626			i_data[EXT3_TIND_BLOCK] = 0;
2627		}
2628	case EXT3_TIND_BLOCK:
2629		;
2630	}
2631
2632	ext3_discard_reservation(inode);
2633
2634	mutex_unlock(&ei->truncate_mutex);
2635	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2636	ext3_mark_inode_dirty(handle, inode);
2637
2638	/*
2639	 * In a multi-transaction truncate, we only make the final transaction
2640	 * synchronous
2641	 */
2642	if (IS_SYNC(inode))
2643		handle->h_sync = 1;
2644out_stop:
2645	/*
2646	 * If this was a simple ftruncate(), and the file will remain alive
2647	 * then we need to clear up the orphan record which we created above.
2648	 * However, if this was a real unlink then we were called by
2649	 * ext3_evict_inode(), and we allow that function to clean up the
2650	 * orphan info for us.
2651	 */
2652	if (inode->i_nlink)
2653		ext3_orphan_del(handle, inode);
2654
2655	ext3_journal_stop(handle);
2656	trace_ext3_truncate_exit(inode);
2657	return;
2658out_notrans:
2659	/*
2660	 * Delete the inode from orphan list so that it doesn't stay there
2661	 * forever and trigger assertion on umount.
2662	 */
2663	if (inode->i_nlink)
2664		ext3_orphan_del(NULL, inode);
2665	trace_ext3_truncate_exit(inode);
2666}
2667
2668static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2669		unsigned long ino, struct ext3_iloc *iloc)
2670{
2671	unsigned long block_group;
2672	unsigned long offset;
2673	ext3_fsblk_t block;
2674	struct ext3_group_desc *gdp;
2675
2676	if (!ext3_valid_inum(sb, ino)) {
2677		/*
2678		 * This error is already checked for in namei.c unless we are
2679		 * looking at an NFS filehandle, in which case no error
2680		 * report is needed
2681		 */
2682		return 0;
2683	}
2684
2685	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2686	gdp = ext3_get_group_desc(sb, block_group, NULL);
2687	if (!gdp)
2688		return 0;
2689	/*
2690	 * Figure out the offset within the block group inode table
2691	 */
2692	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2693		EXT3_INODE_SIZE(sb);
2694	block = le32_to_cpu(gdp->bg_inode_table) +
2695		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
2696
2697	iloc->block_group = block_group;
2698	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2699	return block;
2700}
2701
2702/*
2703 * ext3_get_inode_loc returns with an extra refcount against the inode's
2704 * underlying buffer_head on success. If 'in_mem' is true, we have all
2705 * data in memory that is needed to recreate the on-disk version of this
2706 * inode.
2707 */
2708static int __ext3_get_inode_loc(struct inode *inode,
2709				struct ext3_iloc *iloc, int in_mem)
2710{
2711	ext3_fsblk_t block;
2712	struct buffer_head *bh;
2713
2714	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2715	if (!block)
2716		return -EIO;
2717
2718	bh = sb_getblk(inode->i_sb, block);
2719	if (!bh) {
2720		ext3_error (inode->i_sb, "ext3_get_inode_loc",
2721				"unable to read inode block - "
2722				"inode=%lu, block="E3FSBLK,
2723				 inode->i_ino, block);
2724		return -EIO;
2725	}
2726	if (!buffer_uptodate(bh)) {
2727		lock_buffer(bh);
2728
2729		/*
2730		 * If the buffer has the write error flag, we have failed
2731		 * to write out another inode in the same block.  In this
2732		 * case, we don't have to read the block because we may
2733		 * read the old inode data successfully.
2734		 */
2735		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2736			set_buffer_uptodate(bh);
2737
2738		if (buffer_uptodate(bh)) {
2739			/* someone brought it uptodate while we waited */
2740			unlock_buffer(bh);
2741			goto has_buffer;
2742		}
2743
2744		/*
2745		 * If we have all information of the inode in memory and this
2746		 * is the only valid inode in the block, we need not read the
2747		 * block.
2748		 */
2749		if (in_mem) {
2750			struct buffer_head *bitmap_bh;
2751			struct ext3_group_desc *desc;
2752			int inodes_per_buffer;
2753			int inode_offset, i;
2754			int block_group;
2755			int start;
2756
2757			block_group = (inode->i_ino - 1) /
2758					EXT3_INODES_PER_GROUP(inode->i_sb);
2759			inodes_per_buffer = bh->b_size /
2760				EXT3_INODE_SIZE(inode->i_sb);
2761			inode_offset = ((inode->i_ino - 1) %
2762					EXT3_INODES_PER_GROUP(inode->i_sb));
2763			start = inode_offset & ~(inodes_per_buffer - 1);
2764
2765			/* Is the inode bitmap in cache? */
2766			desc = ext3_get_group_desc(inode->i_sb,
2767						block_group, NULL);
2768			if (!desc)
2769				goto make_io;
2770
2771			bitmap_bh = sb_getblk(inode->i_sb,
2772					le32_to_cpu(desc->bg_inode_bitmap));
2773			if (!bitmap_bh)
2774				goto make_io;
2775
2776			/*
2777			 * If the inode bitmap isn't in cache then the
2778			 * optimisation may end up performing two reads instead
2779			 * of one, so skip it.
2780			 */
2781			if (!buffer_uptodate(bitmap_bh)) {
2782				brelse(bitmap_bh);
2783				goto make_io;
2784			}
2785			for (i = start; i < start + inodes_per_buffer; i++) {
2786				if (i == inode_offset)
2787					continue;
2788				if (ext3_test_bit(i, bitmap_bh->b_data))
2789					break;
2790			}
2791			brelse(bitmap_bh);
2792			if (i == start + inodes_per_buffer) {
2793				/* all other inodes are free, so skip I/O */
2794				memset(bh->b_data, 0, bh->b_size);
2795				set_buffer_uptodate(bh);
2796				unlock_buffer(bh);
2797				goto has_buffer;
2798			}
2799		}
2800
2801make_io:
2802		/*
2803		 * There are other valid inodes in the buffer, this inode
2804		 * has in-inode xattrs, or we don't have this inode in memory.
2805		 * Read the block from disk.
2806		 */
2807		trace_ext3_load_inode(inode);
2808		get_bh(bh);
2809		bh->b_end_io = end_buffer_read_sync;
2810		submit_bh(READ | REQ_META | REQ_PRIO, bh);
2811		wait_on_buffer(bh);
2812		if (!buffer_uptodate(bh)) {
2813			ext3_error(inode->i_sb, "ext3_get_inode_loc",
2814					"unable to read inode block - "
2815					"inode=%lu, block="E3FSBLK,
2816					inode->i_ino, block);
2817			brelse(bh);
2818			return -EIO;
2819		}
2820	}
2821has_buffer:
2822	iloc->bh = bh;
2823	return 0;
2824}
2825
2826int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2827{
2828	/* We have all inode data except xattrs in memory here. */
2829	return __ext3_get_inode_loc(inode, iloc,
2830		!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
2831}
2832
2833void ext3_set_inode_flags(struct inode *inode)
2834{
2835	unsigned int flags = EXT3_I(inode)->i_flags;
2836
2837	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2838	if (flags & EXT3_SYNC_FL)
2839		inode->i_flags |= S_SYNC;
2840	if (flags & EXT3_APPEND_FL)
2841		inode->i_flags |= S_APPEND;
2842	if (flags & EXT3_IMMUTABLE_FL)
2843		inode->i_flags |= S_IMMUTABLE;
2844	if (flags & EXT3_NOATIME_FL)
2845		inode->i_flags |= S_NOATIME;
2846	if (flags & EXT3_DIRSYNC_FL)
2847		inode->i_flags |= S_DIRSYNC;
2848}
2849
2850/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2851void ext3_get_inode_flags(struct ext3_inode_info *ei)
2852{
2853	unsigned int flags = ei->vfs_inode.i_flags;
2854
2855	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2856			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2857	if (flags & S_SYNC)
2858		ei->i_flags |= EXT3_SYNC_FL;
2859	if (flags & S_APPEND)
2860		ei->i_flags |= EXT3_APPEND_FL;
2861	if (flags & S_IMMUTABLE)
2862		ei->i_flags |= EXT3_IMMUTABLE_FL;
2863	if (flags & S_NOATIME)
2864		ei->i_flags |= EXT3_NOATIME_FL;
2865	if (flags & S_DIRSYNC)
2866		ei->i_flags |= EXT3_DIRSYNC_FL;
2867}
2868
2869struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2870{
2871	struct ext3_iloc iloc;
2872	struct ext3_inode *raw_inode;
2873	struct ext3_inode_info *ei;
2874	struct buffer_head *bh;
2875	struct inode *inode;
2876	journal_t *journal = EXT3_SB(sb)->s_journal;
2877	transaction_t *transaction;
2878	long ret;
2879	int block;
 
 
2880
2881	inode = iget_locked(sb, ino);
2882	if (!inode)
2883		return ERR_PTR(-ENOMEM);
2884	if (!(inode->i_state & I_NEW))
2885		return inode;
2886
2887	ei = EXT3_I(inode);
2888	ei->i_block_alloc_info = NULL;
2889
2890	ret = __ext3_get_inode_loc(inode, &iloc, 0);
2891	if (ret < 0)
2892		goto bad_inode;
2893	bh = iloc.bh;
2894	raw_inode = ext3_raw_inode(&iloc);
2895	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2896	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2897	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2898	if(!(test_opt (inode->i_sb, NO_UID32))) {
2899		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2900		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2901	}
2902	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 
 
2903	inode->i_size = le32_to_cpu(raw_inode->i_size);
2904	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2905	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2906	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2907	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2908
2909	ei->i_state_flags = 0;
2910	ei->i_dir_start_lookup = 0;
2911	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2912	/* We now have enough fields to check if the inode was active or not.
2913	 * This is needed because nfsd might try to access dead inodes
2914	 * the test is that same one that e2fsck uses
2915	 * NeilBrown 1999oct15
2916	 */
2917	if (inode->i_nlink == 0) {
2918		if (inode->i_mode == 0 ||
2919		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2920			/* this inode is deleted */
2921			brelse (bh);
2922			ret = -ESTALE;
2923			goto bad_inode;
2924		}
2925		/* The only unlinked inodes we let through here have
2926		 * valid i_mode and are being read by the orphan
2927		 * recovery code: that's fine, we're about to complete
2928		 * the process of deleting those. */
2929	}
2930	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2931	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2932#ifdef EXT3_FRAGMENTS
2933	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2934	ei->i_frag_no = raw_inode->i_frag;
2935	ei->i_frag_size = raw_inode->i_fsize;
2936#endif
2937	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2938	if (!S_ISREG(inode->i_mode)) {
2939		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2940	} else {
2941		inode->i_size |=
2942			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2943	}
2944	ei->i_disksize = inode->i_size;
2945	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2946	ei->i_block_group = iloc.block_group;
2947	/*
2948	 * NOTE! The in-memory inode i_data array is in little-endian order
2949	 * even on big-endian machines: we do NOT byteswap the block numbers!
2950	 */
2951	for (block = 0; block < EXT3_N_BLOCKS; block++)
2952		ei->i_data[block] = raw_inode->i_block[block];
2953	INIT_LIST_HEAD(&ei->i_orphan);
2954
2955	/*
2956	 * Set transaction id's of transactions that have to be committed
2957	 * to finish f[data]sync. We set them to currently running transaction
2958	 * as we cannot be sure that the inode or some of its metadata isn't
2959	 * part of the transaction - the inode could have been reclaimed and
2960	 * now it is reread from disk.
2961	 */
2962	if (journal) {
2963		tid_t tid;
2964
2965		spin_lock(&journal->j_state_lock);
2966		if (journal->j_running_transaction)
2967			transaction = journal->j_running_transaction;
2968		else
2969			transaction = journal->j_committing_transaction;
2970		if (transaction)
2971			tid = transaction->t_tid;
2972		else
2973			tid = journal->j_commit_sequence;
2974		spin_unlock(&journal->j_state_lock);
2975		atomic_set(&ei->i_sync_tid, tid);
2976		atomic_set(&ei->i_datasync_tid, tid);
2977	}
2978
2979	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2980	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2981		/*
2982		 * When mke2fs creates big inodes it does not zero out
2983		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
2984		 * so ignore those first few inodes.
2985		 */
2986		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2987		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2988		    EXT3_INODE_SIZE(inode->i_sb)) {
2989			brelse (bh);
2990			ret = -EIO;
2991			goto bad_inode;
2992		}
2993		if (ei->i_extra_isize == 0) {
2994			/* The extra space is currently unused. Use it. */
2995			ei->i_extra_isize = sizeof(struct ext3_inode) -
2996					    EXT3_GOOD_OLD_INODE_SIZE;
2997		} else {
2998			__le32 *magic = (void *)raw_inode +
2999					EXT3_GOOD_OLD_INODE_SIZE +
3000					ei->i_extra_isize;
3001			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
3002				 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
3003		}
3004	} else
3005		ei->i_extra_isize = 0;
3006
3007	if (S_ISREG(inode->i_mode)) {
3008		inode->i_op = &ext3_file_inode_operations;
3009		inode->i_fop = &ext3_file_operations;
3010		ext3_set_aops(inode);
3011	} else if (S_ISDIR(inode->i_mode)) {
3012		inode->i_op = &ext3_dir_inode_operations;
3013		inode->i_fop = &ext3_dir_operations;
3014	} else if (S_ISLNK(inode->i_mode)) {
3015		if (ext3_inode_is_fast_symlink(inode)) {
3016			inode->i_op = &ext3_fast_symlink_inode_operations;
3017			nd_terminate_link(ei->i_data, inode->i_size,
3018				sizeof(ei->i_data) - 1);
3019		} else {
3020			inode->i_op = &ext3_symlink_inode_operations;
3021			ext3_set_aops(inode);
3022		}
3023	} else {
3024		inode->i_op = &ext3_special_inode_operations;
3025		if (raw_inode->i_block[0])
3026			init_special_inode(inode, inode->i_mode,
3027			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3028		else
3029			init_special_inode(inode, inode->i_mode,
3030			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3031	}
3032	brelse (iloc.bh);
3033	ext3_set_inode_flags(inode);
3034	unlock_new_inode(inode);
3035	return inode;
3036
3037bad_inode:
3038	iget_failed(inode);
3039	return ERR_PTR(ret);
3040}
3041
3042/*
3043 * Post the struct inode info into an on-disk inode location in the
3044 * buffer-cache.  This gobbles the caller's reference to the
3045 * buffer_head in the inode location struct.
3046 *
3047 * The caller must have write access to iloc->bh.
3048 */
3049static int ext3_do_update_inode(handle_t *handle,
3050				struct inode *inode,
3051				struct ext3_iloc *iloc)
3052{
3053	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
3054	struct ext3_inode_info *ei = EXT3_I(inode);
3055	struct buffer_head *bh = iloc->bh;
3056	int err = 0, rc, block;
 
 
 
 
3057
3058again:
3059	/* we can't allow multiple procs in here at once, its a bit racey */
3060	lock_buffer(bh);
3061
3062	/* For fields not not tracking in the in-memory inode,
3063	 * initialise them to zero for new inodes. */
3064	if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
3065		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
3066
3067	ext3_get_inode_flags(ei);
3068	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 
 
3069	if(!(test_opt(inode->i_sb, NO_UID32))) {
3070		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
3071		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
3072/*
3073 * Fix up interoperability with old kernels. Otherwise, old inodes get
3074 * re-used with the upper 16 bits of the uid/gid intact
3075 */
3076		if(!ei->i_dtime) {
3077			raw_inode->i_uid_high =
3078				cpu_to_le16(high_16_bits(inode->i_uid));
3079			raw_inode->i_gid_high =
3080				cpu_to_le16(high_16_bits(inode->i_gid));
3081		} else {
3082			raw_inode->i_uid_high = 0;
3083			raw_inode->i_gid_high = 0;
3084		}
3085	} else {
3086		raw_inode->i_uid_low =
3087			cpu_to_le16(fs_high2lowuid(inode->i_uid));
3088		raw_inode->i_gid_low =
3089			cpu_to_le16(fs_high2lowgid(inode->i_gid));
3090		raw_inode->i_uid_high = 0;
3091		raw_inode->i_gid_high = 0;
3092	}
3093	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
3094	raw_inode->i_size = cpu_to_le32(ei->i_disksize);
 
 
 
 
3095	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
3096	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
3097	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
3098	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
3099	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
3100	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
3101#ifdef EXT3_FRAGMENTS
3102	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
3103	raw_inode->i_frag = ei->i_frag_no;
3104	raw_inode->i_fsize = ei->i_frag_size;
3105#endif
3106	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
3107	if (!S_ISREG(inode->i_mode)) {
3108		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
3109	} else {
3110		raw_inode->i_size_high =
3111			cpu_to_le32(ei->i_disksize >> 32);
 
 
 
3112		if (ei->i_disksize > 0x7fffffffULL) {
3113			struct super_block *sb = inode->i_sb;
3114			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
3115					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
3116			    EXT3_SB(sb)->s_es->s_rev_level ==
3117					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
3118			       /* If this is the first large file
3119				* created, add a flag to the superblock.
3120				*/
3121				unlock_buffer(bh);
3122				err = ext3_journal_get_write_access(handle,
3123						EXT3_SB(sb)->s_sbh);
3124				if (err)
3125					goto out_brelse;
3126
3127				ext3_update_dynamic_rev(sb);
3128				EXT3_SET_RO_COMPAT_FEATURE(sb,
3129					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
3130				handle->h_sync = 1;
3131				err = ext3_journal_dirty_metadata(handle,
3132						EXT3_SB(sb)->s_sbh);
3133				/* get our lock and start over */
3134				goto again;
3135			}
3136		}
3137	}
3138	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3139	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3140		if (old_valid_dev(inode->i_rdev)) {
3141			raw_inode->i_block[0] =
3142				cpu_to_le32(old_encode_dev(inode->i_rdev));
3143			raw_inode->i_block[1] = 0;
3144		} else {
3145			raw_inode->i_block[0] = 0;
3146			raw_inode->i_block[1] =
3147				cpu_to_le32(new_encode_dev(inode->i_rdev));
3148			raw_inode->i_block[2] = 0;
3149		}
3150	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
3151		raw_inode->i_block[block] = ei->i_data[block];
3152
3153	if (ei->i_extra_isize)
3154		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3155
3156	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
3157	unlock_buffer(bh);
3158	rc = ext3_journal_dirty_metadata(handle, bh);
3159	if (!err)
3160		err = rc;
3161	ext3_clear_inode_state(inode, EXT3_STATE_NEW);
3162
3163	atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
 
 
3164out_brelse:
3165	brelse (bh);
3166	ext3_std_error(inode->i_sb, err);
3167	return err;
3168}
3169
3170/*
3171 * ext3_write_inode()
3172 *
3173 * We are called from a few places:
3174 *
3175 * - Within generic_file_write() for O_SYNC files.
3176 *   Here, there will be no transaction running. We wait for any running
3177 *   trasnaction to commit.
3178 *
3179 * - Within sys_sync(), kupdate and such.
3180 *   We wait on commit, if tol to.
3181 *
3182 * - Within prune_icache() (PF_MEMALLOC == true)
3183 *   Here we simply return.  We can't afford to block kswapd on the
3184 *   journal commit.
3185 *
3186 * In all cases it is actually safe for us to return without doing anything,
3187 * because the inode has been copied into a raw inode buffer in
3188 * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
3189 * knfsd.
3190 *
3191 * Note that we are absolutely dependent upon all inode dirtiers doing the
3192 * right thing: they *must* call mark_inode_dirty() after dirtying info in
3193 * which we are interested.
3194 *
3195 * It would be a bug for them to not do this.  The code:
3196 *
3197 *	mark_inode_dirty(inode)
3198 *	stuff();
3199 *	inode->i_size = expr;
3200 *
3201 * is in error because a kswapd-driven write_inode() could occur while
3202 * `stuff()' is running, and the new i_size will be lost.  Plus the inode
3203 * will no longer be on the superblock's dirty inode list.
3204 */
3205int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
3206{
3207	if (current->flags & PF_MEMALLOC)
3208		return 0;
3209
3210	if (ext3_journal_current_handle()) {
3211		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3212		dump_stack();
3213		return -EIO;
3214	}
3215
3216	if (wbc->sync_mode != WB_SYNC_ALL)
 
 
 
 
 
3217		return 0;
3218
3219	return ext3_force_commit(inode->i_sb);
3220}
3221
3222/*
3223 * ext3_setattr()
3224 *
3225 * Called from notify_change.
3226 *
3227 * We want to trap VFS attempts to truncate the file as soon as
3228 * possible.  In particular, we want to make sure that when the VFS
3229 * shrinks i_size, we put the inode on the orphan list and modify
3230 * i_disksize immediately, so that during the subsequent flushing of
3231 * dirty pages and freeing of disk blocks, we can guarantee that any
3232 * commit will leave the blocks being flushed in an unused state on
3233 * disk.  (On recovery, the inode will get truncated and the blocks will
3234 * be freed, so we have a strong guarantee that no future commit will
3235 * leave these blocks visible to the user.)
3236 *
3237 * Called with inode->sem down.
3238 */
3239int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3240{
3241	struct inode *inode = dentry->d_inode;
3242	int error, rc = 0;
3243	const unsigned int ia_valid = attr->ia_valid;
3244
3245	error = inode_change_ok(inode, attr);
3246	if (error)
3247		return error;
3248
3249	if (is_quota_modification(inode, attr))
3250		dquot_initialize(inode);
3251	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3252		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3253		handle_t *handle;
3254
3255		/* (user+group)*(old+new) structure, inode write (sb,
3256		 * inode block, ? - but truncate inode update has it) */
3257		handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
3258					EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
3259		if (IS_ERR(handle)) {
3260			error = PTR_ERR(handle);
3261			goto err_out;
3262		}
3263		error = dquot_transfer(inode, attr);
3264		if (error) {
3265			ext3_journal_stop(handle);
3266			return error;
3267		}
3268		/* Update corresponding info in inode so that everything is in
3269		 * one transaction */
3270		if (attr->ia_valid & ATTR_UID)
3271			inode->i_uid = attr->ia_uid;
3272		if (attr->ia_valid & ATTR_GID)
3273			inode->i_gid = attr->ia_gid;
3274		error = ext3_mark_inode_dirty(handle, inode);
3275		ext3_journal_stop(handle);
3276	}
3277
3278	if (attr->ia_valid & ATTR_SIZE)
3279		inode_dio_wait(inode);
3280
3281	if (S_ISREG(inode->i_mode) &&
3282	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3283		handle_t *handle;
3284
3285		handle = ext3_journal_start(inode, 3);
3286		if (IS_ERR(handle)) {
3287			error = PTR_ERR(handle);
3288			goto err_out;
3289		}
3290
3291		error = ext3_orphan_add(handle, inode);
3292		if (error) {
3293			ext3_journal_stop(handle);
3294			goto err_out;
3295		}
3296		EXT3_I(inode)->i_disksize = attr->ia_size;
3297		error = ext3_mark_inode_dirty(handle, inode);
3298		ext3_journal_stop(handle);
3299		if (error) {
3300			/* Some hard fs error must have happened. Bail out. */
3301			ext3_orphan_del(NULL, inode);
3302			goto err_out;
3303		}
3304		rc = ext3_block_truncate_page(inode, attr->ia_size);
3305		if (rc) {
3306			/* Cleanup orphan list and exit */
3307			handle = ext3_journal_start(inode, 3);
3308			if (IS_ERR(handle)) {
3309				ext3_orphan_del(NULL, inode);
3310				goto err_out;
3311			}
3312			ext3_orphan_del(handle, inode);
3313			ext3_journal_stop(handle);
3314			goto err_out;
3315		}
3316	}
3317
3318	if ((attr->ia_valid & ATTR_SIZE) &&
3319	    attr->ia_size != i_size_read(inode)) {
3320		truncate_setsize(inode, attr->ia_size);
3321		ext3_truncate(inode);
3322	}
3323
3324	setattr_copy(inode, attr);
3325	mark_inode_dirty(inode);
3326
3327	if (ia_valid & ATTR_MODE)
3328		rc = ext3_acl_chmod(inode);
3329
3330err_out:
3331	ext3_std_error(inode->i_sb, error);
3332	if (!error)
3333		error = rc;
3334	return error;
3335}
3336
3337
3338/*
3339 * How many blocks doth make a writepage()?
3340 *
3341 * With N blocks per page, it may be:
3342 * N data blocks
3343 * 2 indirect block
3344 * 2 dindirect
3345 * 1 tindirect
3346 * N+5 bitmap blocks (from the above)
3347 * N+5 group descriptor summary blocks
3348 * 1 inode block
3349 * 1 superblock.
3350 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3351 *
3352 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3353 *
3354 * With ordered or writeback data it's the same, less the N data blocks.
3355 *
3356 * If the inode's direct blocks can hold an integral number of pages then a
3357 * page cannot straddle two indirect blocks, and we can only touch one indirect
3358 * and dindirect block, and the "5" above becomes "3".
3359 *
3360 * This still overestimates under most circumstances.  If we were to pass the
3361 * start and end offsets in here as well we could do block_to_path() on each
3362 * block and work out the exact number of indirects which are touched.  Pah.
3363 */
3364
3365static int ext3_writepage_trans_blocks(struct inode *inode)
3366{
3367	int bpp = ext3_journal_blocks_per_page(inode);
3368	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3369	int ret;
3370
3371	if (ext3_should_journal_data(inode))
3372		ret = 3 * (bpp + indirects) + 2;
3373	else
3374		ret = 2 * (bpp + indirects) + indirects + 2;
3375
3376#ifdef CONFIG_QUOTA
3377	/* We know that structure was already allocated during dquot_initialize so
3378	 * we will be updating only the data blocks + inodes */
3379	ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
3380#endif
3381
3382	return ret;
3383}
3384
3385/*
3386 * The caller must have previously called ext3_reserve_inode_write().
3387 * Give this, we know that the caller already has write access to iloc->bh.
3388 */
3389int ext3_mark_iloc_dirty(handle_t *handle,
3390		struct inode *inode, struct ext3_iloc *iloc)
3391{
3392	int err = 0;
3393
3394	/* the do_update_inode consumes one bh->b_count */
3395	get_bh(iloc->bh);
3396
3397	/* ext3_do_update_inode() does journal_dirty_metadata */
3398	err = ext3_do_update_inode(handle, inode, iloc);
3399	put_bh(iloc->bh);
3400	return err;
3401}
3402
3403/*
3404 * On success, We end up with an outstanding reference count against
3405 * iloc->bh.  This _must_ be cleaned up later.
3406 */
3407
3408int
3409ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3410			 struct ext3_iloc *iloc)
3411{
3412	int err = 0;
3413	if (handle) {
3414		err = ext3_get_inode_loc(inode, iloc);
3415		if (!err) {
3416			BUFFER_TRACE(iloc->bh, "get_write_access");
3417			err = ext3_journal_get_write_access(handle, iloc->bh);
3418			if (err) {
3419				brelse(iloc->bh);
3420				iloc->bh = NULL;
3421			}
3422		}
3423	}
3424	ext3_std_error(inode->i_sb, err);
3425	return err;
3426}
3427
3428/*
3429 * What we do here is to mark the in-core inode as clean with respect to inode
3430 * dirtiness (it may still be data-dirty).
3431 * This means that the in-core inode may be reaped by prune_icache
3432 * without having to perform any I/O.  This is a very good thing,
3433 * because *any* task may call prune_icache - even ones which
3434 * have a transaction open against a different journal.
3435 *
3436 * Is this cheating?  Not really.  Sure, we haven't written the
3437 * inode out, but prune_icache isn't a user-visible syncing function.
3438 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3439 * we start and wait on commits.
3440 *
3441 * Is this efficient/effective?  Well, we're being nice to the system
3442 * by cleaning up our inodes proactively so they can be reaped
3443 * without I/O.  But we are potentially leaving up to five seconds'
3444 * worth of inodes floating about which prune_icache wants us to
3445 * write out.  One way to fix that would be to get prune_icache()
3446 * to do a write_super() to free up some memory.  It has the desired
3447 * effect.
3448 */
3449int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3450{
3451	struct ext3_iloc iloc;
3452	int err;
3453
3454	might_sleep();
3455	trace_ext3_mark_inode_dirty(inode, _RET_IP_);
3456	err = ext3_reserve_inode_write(handle, inode, &iloc);
3457	if (!err)
3458		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3459	return err;
3460}
3461
3462/*
3463 * ext3_dirty_inode() is called from __mark_inode_dirty()
3464 *
3465 * We're really interested in the case where a file is being extended.
3466 * i_size has been changed by generic_commit_write() and we thus need
3467 * to include the updated inode in the current transaction.
3468 *
3469 * Also, dquot_alloc_space() will always dirty the inode when blocks
3470 * are allocated to the file.
3471 *
3472 * If the inode is marked synchronous, we don't honour that here - doing
3473 * so would cause a commit on atime updates, which we don't bother doing.
3474 * We handle synchronous inodes at the highest possible level.
3475 */
3476void ext3_dirty_inode(struct inode *inode, int flags)
3477{
3478	handle_t *current_handle = ext3_journal_current_handle();
3479	handle_t *handle;
3480
3481	handle = ext3_journal_start(inode, 2);
3482	if (IS_ERR(handle))
3483		goto out;
3484	if (current_handle &&
3485		current_handle->h_transaction != handle->h_transaction) {
3486		/* This task has a transaction open against a different fs */
3487		printk(KERN_EMERG "%s: transactions do not match!\n",
3488		       __func__);
3489	} else {
3490		jbd_debug(5, "marking dirty.  outer handle=%p\n",
3491				current_handle);
3492		ext3_mark_inode_dirty(handle, inode);
3493	}
3494	ext3_journal_stop(handle);
3495out:
3496	return;
3497}
3498
3499#if 0
3500/*
3501 * Bind an inode's backing buffer_head into this transaction, to prevent
3502 * it from being flushed to disk early.  Unlike
3503 * ext3_reserve_inode_write, this leaves behind no bh reference and
3504 * returns no iloc structure, so the caller needs to repeat the iloc
3505 * lookup to mark the inode dirty later.
3506 */
3507static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3508{
3509	struct ext3_iloc iloc;
3510
3511	int err = 0;
3512	if (handle) {
3513		err = ext3_get_inode_loc(inode, &iloc);
3514		if (!err) {
3515			BUFFER_TRACE(iloc.bh, "get_write_access");
3516			err = journal_get_write_access(handle, iloc.bh);
3517			if (!err)
3518				err = ext3_journal_dirty_metadata(handle,
3519								  iloc.bh);
3520			brelse(iloc.bh);
3521		}
3522	}
3523	ext3_std_error(inode->i_sb, err);
3524	return err;
3525}
3526#endif
3527
3528int ext3_change_inode_journal_flag(struct inode *inode, int val)
3529{
3530	journal_t *journal;
3531	handle_t *handle;
3532	int err;
3533
3534	/*
3535	 * We have to be very careful here: changing a data block's
3536	 * journaling status dynamically is dangerous.  If we write a
3537	 * data block to the journal, change the status and then delete
3538	 * that block, we risk forgetting to revoke the old log record
3539	 * from the journal and so a subsequent replay can corrupt data.
3540	 * So, first we make sure that the journal is empty and that
3541	 * nobody is changing anything.
3542	 */
3543
3544	journal = EXT3_JOURNAL(inode);
3545	if (is_journal_aborted(journal))
3546		return -EROFS;
3547
3548	journal_lock_updates(journal);
3549	journal_flush(journal);
3550
3551	/*
3552	 * OK, there are no updates running now, and all cached data is
3553	 * synced to disk.  We are now in a completely consistent state
3554	 * which doesn't have anything in the journal, and we know that
3555	 * no filesystem updates are running, so it is safe to modify
3556	 * the inode's in-core data-journaling state flag now.
3557	 */
3558
3559	if (val)
3560		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3561	else
3562		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3563	ext3_set_aops(inode);
3564
3565	journal_unlock_updates(journal);
3566
3567	/* Finally we can mark the inode as dirty. */
3568
3569	handle = ext3_journal_start(inode, 1);
3570	if (IS_ERR(handle))
3571		return PTR_ERR(handle);
3572
3573	err = ext3_mark_inode_dirty(handle, inode);
3574	handle->h_sync = 1;
3575	ext3_journal_stop(handle);
3576	ext3_std_error(inode->i_sb, err);
3577
3578	return err;
3579}
v3.15
   1/*
   2 *  linux/fs/ext3/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  Goal-directed block allocation by Stephen Tweedie
  16 *	(sct@redhat.com), 1993, 1998
  17 *  Big-endian to little-endian byte-swapping/bitmaps by
  18 *        David S. Miller (davem@caip.rutgers.edu), 1995
  19 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  20 *	(jj@sunsite.ms.mff.cuni.cz)
  21 *
  22 *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  23 */
  24
 
 
 
 
 
  25#include <linux/highuid.h>
 
  26#include <linux/quotaops.h>
 
 
  27#include <linux/writeback.h>
  28#include <linux/mpage.h>
 
 
 
  29#include <linux/namei.h>
  30#include <linux/aio.h>
  31#include "ext3.h"
  32#include "xattr.h"
  33#include "acl.h"
  34
  35static int ext3_writepage_trans_blocks(struct inode *inode);
  36static int ext3_block_truncate_page(struct inode *inode, loff_t from);
  37
  38/*
  39 * Test whether an inode is a fast symlink.
  40 */
  41static int ext3_inode_is_fast_symlink(struct inode *inode)
  42{
  43	int ea_blocks = EXT3_I(inode)->i_file_acl ?
  44		(inode->i_sb->s_blocksize >> 9) : 0;
  45
  46	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  47}
  48
  49/*
  50 * The ext3 forget function must perform a revoke if we are freeing data
  51 * which has been journaled.  Metadata (eg. indirect blocks) must be
  52 * revoked in all cases.
  53 *
  54 * "bh" may be NULL: a metadata block may have been freed from memory
  55 * but there may still be a record of it in the journal, and that record
  56 * still needs to be revoked.
  57 */
  58int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  59			struct buffer_head *bh, ext3_fsblk_t blocknr)
  60{
  61	int err;
  62
  63	might_sleep();
  64
  65	trace_ext3_forget(inode, is_metadata, blocknr);
  66	BUFFER_TRACE(bh, "enter");
  67
  68	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  69		  "data mode %lx\n",
  70		  bh, is_metadata, inode->i_mode,
  71		  test_opt(inode->i_sb, DATA_FLAGS));
  72
  73	/* Never use the revoke function if we are doing full data
  74	 * journaling: there is no need to, and a V1 superblock won't
  75	 * support it.  Otherwise, only skip the revoke on un-journaled
  76	 * data blocks. */
  77
  78	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
  79	    (!is_metadata && !ext3_should_journal_data(inode))) {
  80		if (bh) {
  81			BUFFER_TRACE(bh, "call journal_forget");
  82			return ext3_journal_forget(handle, bh);
  83		}
  84		return 0;
  85	}
  86
  87	/*
  88	 * data!=journal && (is_metadata || should_journal_data(inode))
  89	 */
  90	BUFFER_TRACE(bh, "call ext3_journal_revoke");
  91	err = ext3_journal_revoke(handle, blocknr, bh);
  92	if (err)
  93		ext3_abort(inode->i_sb, __func__,
  94			   "error %d when attempting revoke", err);
  95	BUFFER_TRACE(bh, "exit");
  96	return err;
  97}
  98
  99/*
 100 * Work out how many blocks we need to proceed with the next chunk of a
 101 * truncate transaction.
 102 */
 103static unsigned long blocks_for_truncate(struct inode *inode)
 104{
 105	unsigned long needed;
 106
 107	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
 108
 109	/* Give ourselves just enough room to cope with inodes in which
 110	 * i_blocks is corrupt: we've seen disk corruptions in the past
 111	 * which resulted in random data in an inode which looked enough
 112	 * like a regular file for ext3 to try to delete it.  Things
 113	 * will go a bit crazy if that happens, but at least we should
 114	 * try not to panic the whole kernel. */
 115	if (needed < 2)
 116		needed = 2;
 117
 118	/* But we need to bound the transaction so we don't overflow the
 119	 * journal. */
 120	if (needed > EXT3_MAX_TRANS_DATA)
 121		needed = EXT3_MAX_TRANS_DATA;
 122
 123	return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
 124}
 125
 126/*
 127 * Truncate transactions can be complex and absolutely huge.  So we need to
 128 * be able to restart the transaction at a conventient checkpoint to make
 129 * sure we don't overflow the journal.
 130 *
 131 * start_transaction gets us a new handle for a truncate transaction,
 132 * and extend_transaction tries to extend the existing one a bit.  If
 133 * extend fails, we need to propagate the failure up and restart the
 134 * transaction in the top-level truncate loop. --sct
 135 */
 136static handle_t *start_transaction(struct inode *inode)
 137{
 138	handle_t *result;
 139
 140	result = ext3_journal_start(inode, blocks_for_truncate(inode));
 141	if (!IS_ERR(result))
 142		return result;
 143
 144	ext3_std_error(inode->i_sb, PTR_ERR(result));
 145	return result;
 146}
 147
 148/*
 149 * Try to extend this transaction for the purposes of truncation.
 150 *
 151 * Returns 0 if we managed to create more room.  If we can't create more
 152 * room, and the transaction must be restarted we return 1.
 153 */
 154static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
 155{
 156	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
 157		return 0;
 158	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
 159		return 0;
 160	return 1;
 161}
 162
 163/*
 164 * Restart the transaction associated with *handle.  This does a commit,
 165 * so before we call here everything must be consistently dirtied against
 166 * this transaction.
 167 */
 168static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
 169{
 170	int ret;
 171
 172	jbd_debug(2, "restarting handle %p\n", handle);
 173	/*
 174	 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
 175	 * At this moment, get_block can be called only for blocks inside
 176	 * i_size since page cache has been already dropped and writes are
 177	 * blocked by i_mutex. So we can safely drop the truncate_mutex.
 178	 */
 179	mutex_unlock(&EXT3_I(inode)->truncate_mutex);
 180	ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
 181	mutex_lock(&EXT3_I(inode)->truncate_mutex);
 182	return ret;
 183}
 184
 185/*
 186 * Called at inode eviction from icache
 187 */
 188void ext3_evict_inode (struct inode *inode)
 189{
 190	struct ext3_inode_info *ei = EXT3_I(inode);
 191	struct ext3_block_alloc_info *rsv;
 192	handle_t *handle;
 193	int want_delete = 0;
 194
 195	trace_ext3_evict_inode(inode);
 196	if (!inode->i_nlink && !is_bad_inode(inode)) {
 197		dquot_initialize(inode);
 198		want_delete = 1;
 199	}
 200
 201	/*
 202	 * When journalling data dirty buffers are tracked only in the journal.
 203	 * So although mm thinks everything is clean and ready for reaping the
 204	 * inode might still have some pages to write in the running
 205	 * transaction or waiting to be checkpointed. Thus calling
 206	 * journal_invalidatepage() (via truncate_inode_pages()) to discard
 207	 * these buffers can cause data loss. Also even if we did not discard
 208	 * these buffers, we would have no way to find them after the inode
 209	 * is reaped and thus user could see stale data if he tries to read
 210	 * them before the transaction is checkpointed. So be careful and
 211	 * force everything to disk here... We use ei->i_datasync_tid to
 212	 * store the newest transaction containing inode's data.
 213	 *
 214	 * Note that directories do not have this problem because they don't
 215	 * use page cache.
 216	 *
 217	 * The s_journal check handles the case when ext3_get_journal() fails
 218	 * and puts the journal inode.
 219	 */
 220	if (inode->i_nlink && ext3_should_journal_data(inode) &&
 221	    EXT3_SB(inode->i_sb)->s_journal &&
 222	    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
 223	    inode->i_ino != EXT3_JOURNAL_INO) {
 224		tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
 225		journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
 226
 227		log_start_commit(journal, commit_tid);
 228		log_wait_commit(journal, commit_tid);
 229		filemap_write_and_wait(&inode->i_data);
 230	}
 231	truncate_inode_pages_final(&inode->i_data);
 232
 233	ext3_discard_reservation(inode);
 234	rsv = ei->i_block_alloc_info;
 235	ei->i_block_alloc_info = NULL;
 236	if (unlikely(rsv))
 237		kfree(rsv);
 238
 239	if (!want_delete)
 240		goto no_delete;
 241
 242	handle = start_transaction(inode);
 243	if (IS_ERR(handle)) {
 244		/*
 245		 * If we're going to skip the normal cleanup, we still need to
 246		 * make sure that the in-core orphan linked list is properly
 247		 * cleaned up.
 248		 */
 249		ext3_orphan_del(NULL, inode);
 250		goto no_delete;
 251	}
 252
 253	if (IS_SYNC(inode))
 254		handle->h_sync = 1;
 255	inode->i_size = 0;
 256	if (inode->i_blocks)
 257		ext3_truncate(inode);
 258	/*
 259	 * Kill off the orphan record created when the inode lost the last
 260	 * link.  Note that ext3_orphan_del() has to be able to cope with the
 261	 * deletion of a non-existent orphan - ext3_truncate() could
 262	 * have removed the record.
 263	 */
 264	ext3_orphan_del(handle, inode);
 265	ei->i_dtime = get_seconds();
 266
 267	/*
 268	 * One subtle ordering requirement: if anything has gone wrong
 269	 * (transaction abort, IO errors, whatever), then we can still
 270	 * do these next steps (the fs will already have been marked as
 271	 * having errors), but we can't free the inode if the mark_dirty
 272	 * fails.
 273	 */
 274	if (ext3_mark_inode_dirty(handle, inode)) {
 275		/* If that failed, just dquot_drop() and be done with that */
 276		dquot_drop(inode);
 277		clear_inode(inode);
 278	} else {
 279		ext3_xattr_delete_inode(handle, inode);
 280		dquot_free_inode(inode);
 281		dquot_drop(inode);
 282		clear_inode(inode);
 283		ext3_free_inode(handle, inode);
 284	}
 285	ext3_journal_stop(handle);
 286	return;
 287no_delete:
 288	clear_inode(inode);
 289	dquot_drop(inode);
 290}
 291
 292typedef struct {
 293	__le32	*p;
 294	__le32	key;
 295	struct buffer_head *bh;
 296} Indirect;
 297
 298static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
 299{
 300	p->key = *(p->p = v);
 301	p->bh = bh;
 302}
 303
 304static int verify_chain(Indirect *from, Indirect *to)
 305{
 306	while (from <= to && from->key == *from->p)
 307		from++;
 308	return (from > to);
 309}
 310
 311/**
 312 *	ext3_block_to_path - parse the block number into array of offsets
 313 *	@inode: inode in question (we are only interested in its superblock)
 314 *	@i_block: block number to be parsed
 315 *	@offsets: array to store the offsets in
 316 *      @boundary: set this non-zero if the referred-to block is likely to be
 317 *             followed (on disk) by an indirect block.
 318 *
 319 *	To store the locations of file's data ext3 uses a data structure common
 320 *	for UNIX filesystems - tree of pointers anchored in the inode, with
 321 *	data blocks at leaves and indirect blocks in intermediate nodes.
 322 *	This function translates the block number into path in that tree -
 323 *	return value is the path length and @offsets[n] is the offset of
 324 *	pointer to (n+1)th node in the nth one. If @block is out of range
 325 *	(negative or too large) warning is printed and zero returned.
 326 *
 327 *	Note: function doesn't find node addresses, so no IO is needed. All
 328 *	we need to know is the capacity of indirect blocks (taken from the
 329 *	inode->i_sb).
 330 */
 331
 332/*
 333 * Portability note: the last comparison (check that we fit into triple
 334 * indirect block) is spelled differently, because otherwise on an
 335 * architecture with 32-bit longs and 8Kb pages we might get into trouble
 336 * if our filesystem had 8Kb blocks. We might use long long, but that would
 337 * kill us on x86. Oh, well, at least the sign propagation does not matter -
 338 * i_block would have to be negative in the very beginning, so we would not
 339 * get there at all.
 340 */
 341
 342static int ext3_block_to_path(struct inode *inode,
 343			long i_block, int offsets[4], int *boundary)
 344{
 345	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
 346	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
 347	const long direct_blocks = EXT3_NDIR_BLOCKS,
 348		indirect_blocks = ptrs,
 349		double_blocks = (1 << (ptrs_bits * 2));
 350	int n = 0;
 351	int final = 0;
 352
 353	if (i_block < 0) {
 354		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
 355	} else if (i_block < direct_blocks) {
 356		offsets[n++] = i_block;
 357		final = direct_blocks;
 358	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
 359		offsets[n++] = EXT3_IND_BLOCK;
 360		offsets[n++] = i_block;
 361		final = ptrs;
 362	} else if ((i_block -= indirect_blocks) < double_blocks) {
 363		offsets[n++] = EXT3_DIND_BLOCK;
 364		offsets[n++] = i_block >> ptrs_bits;
 365		offsets[n++] = i_block & (ptrs - 1);
 366		final = ptrs;
 367	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
 368		offsets[n++] = EXT3_TIND_BLOCK;
 369		offsets[n++] = i_block >> (ptrs_bits * 2);
 370		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
 371		offsets[n++] = i_block & (ptrs - 1);
 372		final = ptrs;
 373	} else {
 374		ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
 375	}
 376	if (boundary)
 377		*boundary = final - 1 - (i_block & (ptrs - 1));
 378	return n;
 379}
 380
 381/**
 382 *	ext3_get_branch - read the chain of indirect blocks leading to data
 383 *	@inode: inode in question
 384 *	@depth: depth of the chain (1 - direct pointer, etc.)
 385 *	@offsets: offsets of pointers in inode/indirect blocks
 386 *	@chain: place to store the result
 387 *	@err: here we store the error value
 388 *
 389 *	Function fills the array of triples <key, p, bh> and returns %NULL
 390 *	if everything went OK or the pointer to the last filled triple
 391 *	(incomplete one) otherwise. Upon the return chain[i].key contains
 392 *	the number of (i+1)-th block in the chain (as it is stored in memory,
 393 *	i.e. little-endian 32-bit), chain[i].p contains the address of that
 394 *	number (it points into struct inode for i==0 and into the bh->b_data
 395 *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
 396 *	block for i>0 and NULL for i==0. In other words, it holds the block
 397 *	numbers of the chain, addresses they were taken from (and where we can
 398 *	verify that chain did not change) and buffer_heads hosting these
 399 *	numbers.
 400 *
 401 *	Function stops when it stumbles upon zero pointer (absent block)
 402 *		(pointer to last triple returned, *@err == 0)
 403 *	or when it gets an IO error reading an indirect block
 404 *		(ditto, *@err == -EIO)
 405 *	or when it notices that chain had been changed while it was reading
 406 *		(ditto, *@err == -EAGAIN)
 407 *	or when it reads all @depth-1 indirect blocks successfully and finds
 408 *	the whole chain, all way to the data (returns %NULL, *err == 0).
 409 */
 410static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
 411				 Indirect chain[4], int *err)
 412{
 413	struct super_block *sb = inode->i_sb;
 414	Indirect *p = chain;
 415	struct buffer_head *bh;
 416
 417	*err = 0;
 418	/* i_data is not going away, no lock needed */
 419	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
 420	if (!p->key)
 421		goto no_block;
 422	while (--depth) {
 423		bh = sb_bread(sb, le32_to_cpu(p->key));
 424		if (!bh)
 425			goto failure;
 426		/* Reader: pointers */
 427		if (!verify_chain(chain, p))
 428			goto changed;
 429		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
 430		/* Reader: end */
 431		if (!p->key)
 432			goto no_block;
 433	}
 434	return NULL;
 435
 436changed:
 437	brelse(bh);
 438	*err = -EAGAIN;
 439	goto no_block;
 440failure:
 441	*err = -EIO;
 442no_block:
 443	return p;
 444}
 445
 446/**
 447 *	ext3_find_near - find a place for allocation with sufficient locality
 448 *	@inode: owner
 449 *	@ind: descriptor of indirect block.
 450 *
 451 *	This function returns the preferred place for block allocation.
 452 *	It is used when heuristic for sequential allocation fails.
 453 *	Rules are:
 454 *	  + if there is a block to the left of our position - allocate near it.
 455 *	  + if pointer will live in indirect block - allocate near that block.
 456 *	  + if pointer will live in inode - allocate in the same
 457 *	    cylinder group.
 458 *
 459 * In the latter case we colour the starting block by the callers PID to
 460 * prevent it from clashing with concurrent allocations for a different inode
 461 * in the same block group.   The PID is used here so that functionally related
 462 * files will be close-by on-disk.
 463 *
 464 *	Caller must make sure that @ind is valid and will stay that way.
 465 */
 466static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
 467{
 468	struct ext3_inode_info *ei = EXT3_I(inode);
 469	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
 470	__le32 *p;
 471	ext3_fsblk_t bg_start;
 472	ext3_grpblk_t colour;
 473
 474	/* Try to find previous block */
 475	for (p = ind->p - 1; p >= start; p--) {
 476		if (*p)
 477			return le32_to_cpu(*p);
 478	}
 479
 480	/* No such thing, so let's try location of indirect block */
 481	if (ind->bh)
 482		return ind->bh->b_blocknr;
 483
 484	/*
 485	 * It is going to be referred to from the inode itself? OK, just put it
 486	 * into the same cylinder group then.
 487	 */
 488	bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
 489	colour = (current->pid % 16) *
 490			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
 491	return bg_start + colour;
 492}
 493
 494/**
 495 *	ext3_find_goal - find a preferred place for allocation.
 496 *	@inode: owner
 497 *	@block:  block we want
 498 *	@partial: pointer to the last triple within a chain
 499 *
 500 *	Normally this function find the preferred place for block allocation,
 501 *	returns it.
 502 */
 503
 504static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
 505				   Indirect *partial)
 506{
 507	struct ext3_block_alloc_info *block_i;
 508
 509	block_i =  EXT3_I(inode)->i_block_alloc_info;
 510
 511	/*
 512	 * try the heuristic for sequential allocation,
 513	 * failing that at least try to get decent locality.
 514	 */
 515	if (block_i && (block == block_i->last_alloc_logical_block + 1)
 516		&& (block_i->last_alloc_physical_block != 0)) {
 517		return block_i->last_alloc_physical_block + 1;
 518	}
 519
 520	return ext3_find_near(inode, partial);
 521}
 522
 523/**
 524 *	ext3_blks_to_allocate - Look up the block map and count the number
 525 *	of direct blocks need to be allocated for the given branch.
 526 *
 527 *	@branch: chain of indirect blocks
 528 *	@k: number of blocks need for indirect blocks
 529 *	@blks: number of data blocks to be mapped.
 530 *	@blocks_to_boundary:  the offset in the indirect block
 531 *
 532 *	return the total number of blocks to be allocate, including the
 533 *	direct and indirect blocks.
 534 */
 535static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
 536		int blocks_to_boundary)
 537{
 538	unsigned long count = 0;
 539
 540	/*
 541	 * Simple case, [t,d]Indirect block(s) has not allocated yet
 542	 * then it's clear blocks on that path have not allocated
 543	 */
 544	if (k > 0) {
 545		/* right now we don't handle cross boundary allocation */
 546		if (blks < blocks_to_boundary + 1)
 547			count += blks;
 548		else
 549			count += blocks_to_boundary + 1;
 550		return count;
 551	}
 552
 553	count++;
 554	while (count < blks && count <= blocks_to_boundary &&
 555		le32_to_cpu(*(branch[0].p + count)) == 0) {
 556		count++;
 557	}
 558	return count;
 559}
 560
 561/**
 562 *	ext3_alloc_blocks - multiple allocate blocks needed for a branch
 563 *	@handle: handle for this transaction
 564 *	@inode: owner
 565 *	@goal: preferred place for allocation
 566 *	@indirect_blks: the number of blocks need to allocate for indirect
 567 *			blocks
 568 *	@blks:	number of blocks need to allocated for direct blocks
 569 *	@new_blocks: on return it will store the new block numbers for
 570 *	the indirect blocks(if needed) and the first direct block,
 571 *	@err: here we store the error value
 572 *
 573 *	return the number of direct blocks allocated
 574 */
 575static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
 576			ext3_fsblk_t goal, int indirect_blks, int blks,
 577			ext3_fsblk_t new_blocks[4], int *err)
 578{
 579	int target, i;
 580	unsigned long count = 0;
 581	int index = 0;
 582	ext3_fsblk_t current_block = 0;
 583	int ret = 0;
 584
 585	/*
 586	 * Here we try to allocate the requested multiple blocks at once,
 587	 * on a best-effort basis.
 588	 * To build a branch, we should allocate blocks for
 589	 * the indirect blocks(if not allocated yet), and at least
 590	 * the first direct block of this branch.  That's the
 591	 * minimum number of blocks need to allocate(required)
 592	 */
 593	target = blks + indirect_blks;
 594
 595	while (1) {
 596		count = target;
 597		/* allocating blocks for indirect blocks and direct blocks */
 598		current_block = ext3_new_blocks(handle,inode,goal,&count,err);
 599		if (*err)
 600			goto failed_out;
 601
 602		target -= count;
 603		/* allocate blocks for indirect blocks */
 604		while (index < indirect_blks && count) {
 605			new_blocks[index++] = current_block++;
 606			count--;
 607		}
 608
 609		if (count > 0)
 610			break;
 611	}
 612
 613	/* save the new block number for the first direct block */
 614	new_blocks[index] = current_block;
 615
 616	/* total number of blocks allocated for direct blocks */
 617	ret = count;
 618	*err = 0;
 619	return ret;
 620failed_out:
 621	for (i = 0; i <index; i++)
 622		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 623	return ret;
 624}
 625
 626/**
 627 *	ext3_alloc_branch - allocate and set up a chain of blocks.
 628 *	@handle: handle for this transaction
 629 *	@inode: owner
 630 *	@indirect_blks: number of allocated indirect blocks
 631 *	@blks: number of allocated direct blocks
 632 *	@goal: preferred place for allocation
 633 *	@offsets: offsets (in the blocks) to store the pointers to next.
 634 *	@branch: place to store the chain in.
 635 *
 636 *	This function allocates blocks, zeroes out all but the last one,
 637 *	links them into chain and (if we are synchronous) writes them to disk.
 638 *	In other words, it prepares a branch that can be spliced onto the
 639 *	inode. It stores the information about that chain in the branch[], in
 640 *	the same format as ext3_get_branch() would do. We are calling it after
 641 *	we had read the existing part of chain and partial points to the last
 642 *	triple of that (one with zero ->key). Upon the exit we have the same
 643 *	picture as after the successful ext3_get_block(), except that in one
 644 *	place chain is disconnected - *branch->p is still zero (we did not
 645 *	set the last link), but branch->key contains the number that should
 646 *	be placed into *branch->p to fill that gap.
 647 *
 648 *	If allocation fails we free all blocks we've allocated (and forget
 649 *	their buffer_heads) and return the error value the from failed
 650 *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
 651 *	as described above and return 0.
 652 */
 653static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
 654			int indirect_blks, int *blks, ext3_fsblk_t goal,
 655			int *offsets, Indirect *branch)
 656{
 657	int blocksize = inode->i_sb->s_blocksize;
 658	int i, n = 0;
 659	int err = 0;
 660	struct buffer_head *bh;
 661	int num;
 662	ext3_fsblk_t new_blocks[4];
 663	ext3_fsblk_t current_block;
 664
 665	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
 666				*blks, new_blocks, &err);
 667	if (err)
 668		return err;
 669
 670	branch[0].key = cpu_to_le32(new_blocks[0]);
 671	/*
 672	 * metadata blocks and data blocks are allocated.
 673	 */
 674	for (n = 1; n <= indirect_blks;  n++) {
 675		/*
 676		 * Get buffer_head for parent block, zero it out
 677		 * and set the pointer to new one, then send
 678		 * parent to disk.
 679		 */
 680		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
 681		if (unlikely(!bh)) {
 682			err = -ENOMEM;
 683			goto failed;
 684		}
 685		branch[n].bh = bh;
 686		lock_buffer(bh);
 687		BUFFER_TRACE(bh, "call get_create_access");
 688		err = ext3_journal_get_create_access(handle, bh);
 689		if (err) {
 690			unlock_buffer(bh);
 691			brelse(bh);
 692			goto failed;
 693		}
 694
 695		memset(bh->b_data, 0, blocksize);
 696		branch[n].p = (__le32 *) bh->b_data + offsets[n];
 697		branch[n].key = cpu_to_le32(new_blocks[n]);
 698		*branch[n].p = branch[n].key;
 699		if ( n == indirect_blks) {
 700			current_block = new_blocks[n];
 701			/*
 702			 * End of chain, update the last new metablock of
 703			 * the chain to point to the new allocated
 704			 * data blocks numbers
 705			 */
 706			for (i=1; i < num; i++)
 707				*(branch[n].p + i) = cpu_to_le32(++current_block);
 708		}
 709		BUFFER_TRACE(bh, "marking uptodate");
 710		set_buffer_uptodate(bh);
 711		unlock_buffer(bh);
 712
 713		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
 714		err = ext3_journal_dirty_metadata(handle, bh);
 715		if (err)
 716			goto failed;
 717	}
 718	*blks = num;
 719	return err;
 720failed:
 721	/* Allocation failed, free what we already allocated */
 722	for (i = 1; i <= n ; i++) {
 723		BUFFER_TRACE(branch[i].bh, "call journal_forget");
 724		ext3_journal_forget(handle, branch[i].bh);
 725	}
 726	for (i = 0; i < indirect_blks; i++)
 727		ext3_free_blocks(handle, inode, new_blocks[i], 1);
 728
 729	ext3_free_blocks(handle, inode, new_blocks[i], num);
 730
 731	return err;
 732}
 733
 734/**
 735 * ext3_splice_branch - splice the allocated branch onto inode.
 736 * @handle: handle for this transaction
 737 * @inode: owner
 738 * @block: (logical) number of block we are adding
 739 * @where: location of missing link
 740 * @num:   number of indirect blocks we are adding
 741 * @blks:  number of direct blocks we are adding
 742 *
 743 * This function fills the missing link and does all housekeeping needed in
 744 * inode (->i_blocks, etc.). In case of success we end up with the full
 745 * chain to new block and return 0.
 746 */
 747static int ext3_splice_branch(handle_t *handle, struct inode *inode,
 748			long block, Indirect *where, int num, int blks)
 749{
 750	int i;
 751	int err = 0;
 752	struct ext3_block_alloc_info *block_i;
 753	ext3_fsblk_t current_block;
 754	struct ext3_inode_info *ei = EXT3_I(inode);
 755	struct timespec now;
 756
 757	block_i = ei->i_block_alloc_info;
 758	/*
 759	 * If we're splicing into a [td]indirect block (as opposed to the
 760	 * inode) then we need to get write access to the [td]indirect block
 761	 * before the splice.
 762	 */
 763	if (where->bh) {
 764		BUFFER_TRACE(where->bh, "get_write_access");
 765		err = ext3_journal_get_write_access(handle, where->bh);
 766		if (err)
 767			goto err_out;
 768	}
 769	/* That's it */
 770
 771	*where->p = where->key;
 772
 773	/*
 774	 * Update the host buffer_head or inode to point to more just allocated
 775	 * direct blocks blocks
 776	 */
 777	if (num == 0 && blks > 1) {
 778		current_block = le32_to_cpu(where->key) + 1;
 779		for (i = 1; i < blks; i++)
 780			*(where->p + i ) = cpu_to_le32(current_block++);
 781	}
 782
 783	/*
 784	 * update the most recently allocated logical & physical block
 785	 * in i_block_alloc_info, to assist find the proper goal block for next
 786	 * allocation
 787	 */
 788	if (block_i) {
 789		block_i->last_alloc_logical_block = block + blks - 1;
 790		block_i->last_alloc_physical_block =
 791				le32_to_cpu(where[num].key) + blks - 1;
 792	}
 793
 794	/* We are done with atomic stuff, now do the rest of housekeeping */
 795	now = CURRENT_TIME_SEC;
 796	if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) {
 797		inode->i_ctime = now;
 798		ext3_mark_inode_dirty(handle, inode);
 799	}
 800	/* ext3_mark_inode_dirty already updated i_sync_tid */
 801	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
 802
 803	/* had we spliced it onto indirect block? */
 804	if (where->bh) {
 805		/*
 806		 * If we spliced it onto an indirect block, we haven't
 807		 * altered the inode.  Note however that if it is being spliced
 808		 * onto an indirect block at the very end of the file (the
 809		 * file is growing) then we *will* alter the inode to reflect
 810		 * the new i_size.  But that is not done here - it is done in
 811		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
 812		 */
 813		jbd_debug(5, "splicing indirect only\n");
 814		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
 815		err = ext3_journal_dirty_metadata(handle, where->bh);
 816		if (err)
 817			goto err_out;
 818	} else {
 819		/*
 820		 * OK, we spliced it into the inode itself on a direct block.
 821		 * Inode was dirtied above.
 822		 */
 823		jbd_debug(5, "splicing direct\n");
 824	}
 825	return err;
 826
 827err_out:
 828	for (i = 1; i <= num; i++) {
 829		BUFFER_TRACE(where[i].bh, "call journal_forget");
 830		ext3_journal_forget(handle, where[i].bh);
 831		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
 832	}
 833	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
 834
 835	return err;
 836}
 837
 838/*
 839 * Allocation strategy is simple: if we have to allocate something, we will
 840 * have to go the whole way to leaf. So let's do it before attaching anything
 841 * to tree, set linkage between the newborn blocks, write them if sync is
 842 * required, recheck the path, free and repeat if check fails, otherwise
 843 * set the last missing link (that will protect us from any truncate-generated
 844 * removals - all blocks on the path are immune now) and possibly force the
 845 * write on the parent block.
 846 * That has a nice additional property: no special recovery from the failed
 847 * allocations is needed - we simply release blocks and do not touch anything
 848 * reachable from inode.
 849 *
 850 * `handle' can be NULL if create == 0.
 851 *
 852 * The BKL may not be held on entry here.  Be sure to take it early.
 853 * return > 0, # of blocks mapped or allocated.
 854 * return = 0, if plain lookup failed.
 855 * return < 0, error case.
 856 */
 857int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
 858		sector_t iblock, unsigned long maxblocks,
 859		struct buffer_head *bh_result,
 860		int create)
 861{
 862	int err = -EIO;
 863	int offsets[4];
 864	Indirect chain[4];
 865	Indirect *partial;
 866	ext3_fsblk_t goal;
 867	int indirect_blks;
 868	int blocks_to_boundary = 0;
 869	int depth;
 870	struct ext3_inode_info *ei = EXT3_I(inode);
 871	int count = 0;
 872	ext3_fsblk_t first_block = 0;
 873
 874
 875	trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
 876	J_ASSERT(handle != NULL || create == 0);
 877	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 878
 879	if (depth == 0)
 880		goto out;
 881
 882	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 883
 884	/* Simplest case - block found, no allocation needed */
 885	if (!partial) {
 886		first_block = le32_to_cpu(chain[depth - 1].key);
 887		clear_buffer_new(bh_result);
 888		count++;
 889		/*map more blocks*/
 890		while (count < maxblocks && count <= blocks_to_boundary) {
 891			ext3_fsblk_t blk;
 892
 893			if (!verify_chain(chain, chain + depth - 1)) {
 894				/*
 895				 * Indirect block might be removed by
 896				 * truncate while we were reading it.
 897				 * Handling of that case: forget what we've
 898				 * got now. Flag the err as EAGAIN, so it
 899				 * will reread.
 900				 */
 901				err = -EAGAIN;
 902				count = 0;
 903				break;
 904			}
 905			blk = le32_to_cpu(*(chain[depth-1].p + count));
 906
 907			if (blk == first_block + count)
 908				count++;
 909			else
 910				break;
 911		}
 912		if (err != -EAGAIN)
 913			goto got_it;
 914	}
 915
 916	/* Next simple case - plain lookup or failed read of indirect block */
 917	if (!create || err == -EIO)
 918		goto cleanup;
 919
 920	/*
 921	 * Block out ext3_truncate while we alter the tree
 922	 */
 923	mutex_lock(&ei->truncate_mutex);
 924
 925	/*
 926	 * If the indirect block is missing while we are reading
 927	 * the chain(ext3_get_branch() returns -EAGAIN err), or
 928	 * if the chain has been changed after we grab the semaphore,
 929	 * (either because another process truncated this branch, or
 930	 * another get_block allocated this branch) re-grab the chain to see if
 931	 * the request block has been allocated or not.
 932	 *
 933	 * Since we already block the truncate/other get_block
 934	 * at this point, we will have the current copy of the chain when we
 935	 * splice the branch into the tree.
 936	 */
 937	if (err == -EAGAIN || !verify_chain(chain, partial)) {
 938		while (partial > chain) {
 939			brelse(partial->bh);
 940			partial--;
 941		}
 942		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 943		if (!partial) {
 944			count++;
 945			mutex_unlock(&ei->truncate_mutex);
 946			if (err)
 947				goto cleanup;
 948			clear_buffer_new(bh_result);
 949			goto got_it;
 950		}
 951	}
 952
 953	/*
 954	 * Okay, we need to do block allocation.  Lazily initialize the block
 955	 * allocation info here if necessary
 956	*/
 957	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
 958		ext3_init_block_alloc_info(inode);
 959
 960	goal = ext3_find_goal(inode, iblock, partial);
 961
 962	/* the number of blocks need to allocate for [d,t]indirect blocks */
 963	indirect_blks = (chain + depth) - partial - 1;
 964
 965	/*
 966	 * Next look up the indirect map to count the totoal number of
 967	 * direct blocks to allocate for this branch.
 968	 */
 969	count = ext3_blks_to_allocate(partial, indirect_blks,
 970					maxblocks, blocks_to_boundary);
 971	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
 972				offsets + (partial - chain), partial);
 973
 974	/*
 975	 * The ext3_splice_branch call will free and forget any buffers
 976	 * on the new chain if there is a failure, but that risks using
 977	 * up transaction credits, especially for bitmaps where the
 978	 * credits cannot be returned.  Can we handle this somehow?  We
 979	 * may need to return -EAGAIN upwards in the worst case.  --sct
 980	 */
 981	if (!err)
 982		err = ext3_splice_branch(handle, inode, iblock,
 983					partial, indirect_blks, count);
 984	mutex_unlock(&ei->truncate_mutex);
 985	if (err)
 986		goto cleanup;
 987
 988	set_buffer_new(bh_result);
 989got_it:
 990	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
 991	if (count > blocks_to_boundary)
 992		set_buffer_boundary(bh_result);
 993	err = count;
 994	/* Clean up and exit */
 995	partial = chain + depth - 1;	/* the whole chain */
 996cleanup:
 997	while (partial > chain) {
 998		BUFFER_TRACE(partial->bh, "call brelse");
 999		brelse(partial->bh);
1000		partial--;
1001	}
1002	BUFFER_TRACE(bh_result, "returned");
1003out:
1004	trace_ext3_get_blocks_exit(inode, iblock,
1005				   depth ? le32_to_cpu(chain[depth-1].key) : 0,
1006				   count, err);
1007	return err;
1008}
1009
1010/* Maximum number of blocks we map for direct IO at once. */
1011#define DIO_MAX_BLOCKS 4096
1012/*
1013 * Number of credits we need for writing DIO_MAX_BLOCKS:
1014 * We need sb + group descriptor + bitmap + inode -> 4
1015 * For B blocks with A block pointers per block we need:
1016 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
1017 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
1018 */
1019#define DIO_CREDITS 25
1020
1021static int ext3_get_block(struct inode *inode, sector_t iblock,
1022			struct buffer_head *bh_result, int create)
1023{
1024	handle_t *handle = ext3_journal_current_handle();
1025	int ret = 0, started = 0;
1026	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1027
1028	if (create && !handle) {	/* Direct IO write... */
1029		if (max_blocks > DIO_MAX_BLOCKS)
1030			max_blocks = DIO_MAX_BLOCKS;
1031		handle = ext3_journal_start(inode, DIO_CREDITS +
1032				EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
1033		if (IS_ERR(handle)) {
1034			ret = PTR_ERR(handle);
1035			goto out;
1036		}
1037		started = 1;
1038	}
1039
1040	ret = ext3_get_blocks_handle(handle, inode, iblock,
1041					max_blocks, bh_result, create);
1042	if (ret > 0) {
1043		bh_result->b_size = (ret << inode->i_blkbits);
1044		ret = 0;
1045	}
1046	if (started)
1047		ext3_journal_stop(handle);
1048out:
1049	return ret;
1050}
1051
1052int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1053		u64 start, u64 len)
1054{
1055	return generic_block_fiemap(inode, fieinfo, start, len,
1056				    ext3_get_block);
1057}
1058
1059/*
1060 * `handle' can be NULL if create is zero
1061 */
1062struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1063				long block, int create, int *errp)
1064{
1065	struct buffer_head dummy;
1066	int fatal = 0, err;
1067
1068	J_ASSERT(handle != NULL || create == 0);
1069
1070	dummy.b_state = 0;
1071	dummy.b_blocknr = -1000;
1072	buffer_trace_init(&dummy.b_history);
1073	err = ext3_get_blocks_handle(handle, inode, block, 1,
1074					&dummy, create);
1075	/*
1076	 * ext3_get_blocks_handle() returns number of blocks
1077	 * mapped. 0 in case of a HOLE.
1078	 */
1079	if (err > 0) {
1080		WARN_ON(err > 1);
 
1081		err = 0;
1082	}
1083	*errp = err;
1084	if (!err && buffer_mapped(&dummy)) {
1085		struct buffer_head *bh;
1086		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1087		if (unlikely(!bh)) {
1088			*errp = -ENOMEM;
1089			goto err;
1090		}
1091		if (buffer_new(&dummy)) {
1092			J_ASSERT(create != 0);
1093			J_ASSERT(handle != NULL);
1094
1095			/*
1096			 * Now that we do not always journal data, we should
1097			 * keep in mind whether this should always journal the
1098			 * new buffer as metadata.  For now, regular file
1099			 * writes use ext3_get_block instead, so it's not a
1100			 * problem.
1101			 */
1102			lock_buffer(bh);
1103			BUFFER_TRACE(bh, "call get_create_access");
1104			fatal = ext3_journal_get_create_access(handle, bh);
1105			if (!fatal && !buffer_uptodate(bh)) {
1106				memset(bh->b_data,0,inode->i_sb->s_blocksize);
1107				set_buffer_uptodate(bh);
1108			}
1109			unlock_buffer(bh);
1110			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1111			err = ext3_journal_dirty_metadata(handle, bh);
1112			if (!fatal)
1113				fatal = err;
1114		} else {
1115			BUFFER_TRACE(bh, "not a new buffer");
1116		}
1117		if (fatal) {
1118			*errp = fatal;
1119			brelse(bh);
1120			bh = NULL;
1121		}
1122		return bh;
1123	}
1124err:
1125	return NULL;
1126}
1127
1128struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1129			       int block, int create, int *err)
1130{
1131	struct buffer_head * bh;
1132
1133	bh = ext3_getblk(handle, inode, block, create, err);
1134	if (!bh)
1135		return bh;
1136	if (bh_uptodate_or_lock(bh))
1137		return bh;
1138	get_bh(bh);
1139	bh->b_end_io = end_buffer_read_sync;
1140	submit_bh(READ | REQ_META | REQ_PRIO, bh);
1141	wait_on_buffer(bh);
1142	if (buffer_uptodate(bh))
1143		return bh;
1144	put_bh(bh);
1145	*err = -EIO;
1146	return NULL;
1147}
1148
1149static int walk_page_buffers(	handle_t *handle,
1150				struct buffer_head *head,
1151				unsigned from,
1152				unsigned to,
1153				int *partial,
1154				int (*fn)(	handle_t *handle,
1155						struct buffer_head *bh))
1156{
1157	struct buffer_head *bh;
1158	unsigned block_start, block_end;
1159	unsigned blocksize = head->b_size;
1160	int err, ret = 0;
1161	struct buffer_head *next;
1162
1163	for (	bh = head, block_start = 0;
1164		ret == 0 && (bh != head || !block_start);
1165		block_start = block_end, bh = next)
1166	{
1167		next = bh->b_this_page;
1168		block_end = block_start + blocksize;
1169		if (block_end <= from || block_start >= to) {
1170			if (partial && !buffer_uptodate(bh))
1171				*partial = 1;
1172			continue;
1173		}
1174		err = (*fn)(handle, bh);
1175		if (!ret)
1176			ret = err;
1177	}
1178	return ret;
1179}
1180
1181/*
1182 * To preserve ordering, it is essential that the hole instantiation and
1183 * the data write be encapsulated in a single transaction.  We cannot
1184 * close off a transaction and start a new one between the ext3_get_block()
1185 * and the commit_write().  So doing the journal_start at the start of
1186 * prepare_write() is the right place.
1187 *
1188 * Also, this function can nest inside ext3_writepage() ->
1189 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1190 * has generated enough buffer credits to do the whole page.  So we won't
1191 * block on the journal in that case, which is good, because the caller may
1192 * be PF_MEMALLOC.
1193 *
1194 * By accident, ext3 can be reentered when a transaction is open via
1195 * quota file writes.  If we were to commit the transaction while thus
1196 * reentered, there can be a deadlock - we would be holding a quota
1197 * lock, and the commit would never complete if another thread had a
1198 * transaction open and was blocking on the quota lock - a ranking
1199 * violation.
1200 *
1201 * So what we do is to rely on the fact that journal_stop/journal_start
1202 * will _not_ run commit under these circumstances because handle->h_ref
1203 * is elevated.  We'll still have enough credits for the tiny quotafile
1204 * write.
1205 */
1206static int do_journal_get_write_access(handle_t *handle,
1207					struct buffer_head *bh)
1208{
1209	int dirty = buffer_dirty(bh);
1210	int ret;
1211
1212	if (!buffer_mapped(bh) || buffer_freed(bh))
1213		return 0;
1214	/*
1215	 * __block_prepare_write() could have dirtied some buffers. Clean
1216	 * the dirty bit as jbd2_journal_get_write_access() could complain
1217	 * otherwise about fs integrity issues. Setting of the dirty bit
1218	 * by __block_prepare_write() isn't a real problem here as we clear
1219	 * the bit before releasing a page lock and thus writeback cannot
1220	 * ever write the buffer.
1221	 */
1222	if (dirty)
1223		clear_buffer_dirty(bh);
1224	ret = ext3_journal_get_write_access(handle, bh);
1225	if (!ret && dirty)
1226		ret = ext3_journal_dirty_metadata(handle, bh);
1227	return ret;
1228}
1229
1230/*
1231 * Truncate blocks that were not used by write. We have to truncate the
1232 * pagecache as well so that corresponding buffers get properly unmapped.
1233 */
1234static void ext3_truncate_failed_write(struct inode *inode)
1235{
1236	truncate_inode_pages(inode->i_mapping, inode->i_size);
1237	ext3_truncate(inode);
1238}
1239
1240/*
1241 * Truncate blocks that were not used by direct IO write. We have to zero out
1242 * the last file block as well because direct IO might have written to it.
1243 */
1244static void ext3_truncate_failed_direct_write(struct inode *inode)
1245{
1246	ext3_block_truncate_page(inode, inode->i_size);
1247	ext3_truncate(inode);
1248}
1249
1250static int ext3_write_begin(struct file *file, struct address_space *mapping,
1251				loff_t pos, unsigned len, unsigned flags,
1252				struct page **pagep, void **fsdata)
1253{
1254	struct inode *inode = mapping->host;
1255	int ret;
1256	handle_t *handle;
1257	int retries = 0;
1258	struct page *page;
1259	pgoff_t index;
1260	unsigned from, to;
1261	/* Reserve one block more for addition to orphan list in case
1262	 * we allocate blocks but write fails for some reason */
1263	int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
1264
1265	trace_ext3_write_begin(inode, pos, len, flags);
1266
1267	index = pos >> PAGE_CACHE_SHIFT;
1268	from = pos & (PAGE_CACHE_SIZE - 1);
1269	to = from + len;
1270
1271retry:
1272	page = grab_cache_page_write_begin(mapping, index, flags);
1273	if (!page)
1274		return -ENOMEM;
1275	*pagep = page;
1276
1277	handle = ext3_journal_start(inode, needed_blocks);
1278	if (IS_ERR(handle)) {
1279		unlock_page(page);
1280		page_cache_release(page);
1281		ret = PTR_ERR(handle);
1282		goto out;
1283	}
1284	ret = __block_write_begin(page, pos, len, ext3_get_block);
1285	if (ret)
1286		goto write_begin_failed;
1287
1288	if (ext3_should_journal_data(inode)) {
1289		ret = walk_page_buffers(handle, page_buffers(page),
1290				from, to, NULL, do_journal_get_write_access);
1291	}
1292write_begin_failed:
1293	if (ret) {
1294		/*
1295		 * block_write_begin may have instantiated a few blocks
1296		 * outside i_size.  Trim these off again. Don't need
1297		 * i_size_read because we hold i_mutex.
1298		 *
1299		 * Add inode to orphan list in case we crash before truncate
1300		 * finishes. Do this only if ext3_can_truncate() agrees so
1301		 * that orphan processing code is happy.
1302		 */
1303		if (pos + len > inode->i_size && ext3_can_truncate(inode))
1304			ext3_orphan_add(handle, inode);
1305		ext3_journal_stop(handle);
1306		unlock_page(page);
1307		page_cache_release(page);
1308		if (pos + len > inode->i_size)
1309			ext3_truncate_failed_write(inode);
1310	}
1311	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1312		goto retry;
1313out:
1314	return ret;
1315}
1316
1317
1318int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1319{
1320	int err = journal_dirty_data(handle, bh);
1321	if (err)
1322		ext3_journal_abort_handle(__func__, __func__,
1323						bh, handle, err);
1324	return err;
1325}
1326
1327/* For ordered writepage and write_end functions */
1328static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1329{
1330	/*
1331	 * Write could have mapped the buffer but it didn't copy the data in
1332	 * yet. So avoid filing such buffer into a transaction.
1333	 */
1334	if (buffer_mapped(bh) && buffer_uptodate(bh))
1335		return ext3_journal_dirty_data(handle, bh);
1336	return 0;
1337}
1338
1339/* For write_end() in data=journal mode */
1340static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1341{
1342	if (!buffer_mapped(bh) || buffer_freed(bh))
1343		return 0;
1344	set_buffer_uptodate(bh);
1345	return ext3_journal_dirty_metadata(handle, bh);
1346}
1347
1348/*
1349 * This is nasty and subtle: ext3_write_begin() could have allocated blocks
1350 * for the whole page but later we failed to copy the data in. Update inode
1351 * size according to what we managed to copy. The rest is going to be
1352 * truncated in write_end function.
1353 */
1354static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
1355{
1356	/* What matters to us is i_disksize. We don't write i_size anywhere */
1357	if (pos + copied > inode->i_size)
1358		i_size_write(inode, pos + copied);
1359	if (pos + copied > EXT3_I(inode)->i_disksize) {
1360		EXT3_I(inode)->i_disksize = pos + copied;
1361		mark_inode_dirty(inode);
1362	}
1363}
1364
1365/*
1366 * We need to pick up the new inode size which generic_commit_write gave us
1367 * `file' can be NULL - eg, when called from page_symlink().
1368 *
1369 * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1370 * buffers are managed internally.
1371 */
1372static int ext3_ordered_write_end(struct file *file,
1373				struct address_space *mapping,
1374				loff_t pos, unsigned len, unsigned copied,
1375				struct page *page, void *fsdata)
1376{
1377	handle_t *handle = ext3_journal_current_handle();
1378	struct inode *inode = file->f_mapping->host;
1379	unsigned from, to;
1380	int ret = 0, ret2;
1381
1382	trace_ext3_ordered_write_end(inode, pos, len, copied);
1383	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1384
1385	from = pos & (PAGE_CACHE_SIZE - 1);
1386	to = from + copied;
1387	ret = walk_page_buffers(handle, page_buffers(page),
1388		from, to, NULL, journal_dirty_data_fn);
1389
1390	if (ret == 0)
1391		update_file_sizes(inode, pos, copied);
1392	/*
1393	 * There may be allocated blocks outside of i_size because
1394	 * we failed to copy some data. Prepare for truncate.
1395	 */
1396	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1397		ext3_orphan_add(handle, inode);
1398	ret2 = ext3_journal_stop(handle);
1399	if (!ret)
1400		ret = ret2;
1401	unlock_page(page);
1402	page_cache_release(page);
1403
1404	if (pos + len > inode->i_size)
1405		ext3_truncate_failed_write(inode);
1406	return ret ? ret : copied;
1407}
1408
1409static int ext3_writeback_write_end(struct file *file,
1410				struct address_space *mapping,
1411				loff_t pos, unsigned len, unsigned copied,
1412				struct page *page, void *fsdata)
1413{
1414	handle_t *handle = ext3_journal_current_handle();
1415	struct inode *inode = file->f_mapping->host;
1416	int ret;
1417
1418	trace_ext3_writeback_write_end(inode, pos, len, copied);
1419	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1420	update_file_sizes(inode, pos, copied);
1421	/*
1422	 * There may be allocated blocks outside of i_size because
1423	 * we failed to copy some data. Prepare for truncate.
1424	 */
1425	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1426		ext3_orphan_add(handle, inode);
1427	ret = ext3_journal_stop(handle);
1428	unlock_page(page);
1429	page_cache_release(page);
1430
1431	if (pos + len > inode->i_size)
1432		ext3_truncate_failed_write(inode);
1433	return ret ? ret : copied;
1434}
1435
1436static int ext3_journalled_write_end(struct file *file,
1437				struct address_space *mapping,
1438				loff_t pos, unsigned len, unsigned copied,
1439				struct page *page, void *fsdata)
1440{
1441	handle_t *handle = ext3_journal_current_handle();
1442	struct inode *inode = mapping->host;
1443	struct ext3_inode_info *ei = EXT3_I(inode);
1444	int ret = 0, ret2;
1445	int partial = 0;
1446	unsigned from, to;
1447
1448	trace_ext3_journalled_write_end(inode, pos, len, copied);
1449	from = pos & (PAGE_CACHE_SIZE - 1);
1450	to = from + len;
1451
1452	if (copied < len) {
1453		if (!PageUptodate(page))
1454			copied = 0;
1455		page_zero_new_buffers(page, from + copied, to);
1456		to = from + copied;
1457	}
1458
1459	ret = walk_page_buffers(handle, page_buffers(page), from,
1460				to, &partial, write_end_fn);
1461	if (!partial)
1462		SetPageUptodate(page);
1463
1464	if (pos + copied > inode->i_size)
1465		i_size_write(inode, pos + copied);
1466	/*
1467	 * There may be allocated blocks outside of i_size because
1468	 * we failed to copy some data. Prepare for truncate.
1469	 */
1470	if (pos + len > inode->i_size && ext3_can_truncate(inode))
1471		ext3_orphan_add(handle, inode);
1472	ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1473	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
1474	if (inode->i_size > ei->i_disksize) {
1475		ei->i_disksize = inode->i_size;
1476		ret2 = ext3_mark_inode_dirty(handle, inode);
1477		if (!ret)
1478			ret = ret2;
1479	}
1480
1481	ret2 = ext3_journal_stop(handle);
1482	if (!ret)
1483		ret = ret2;
1484	unlock_page(page);
1485	page_cache_release(page);
1486
1487	if (pos + len > inode->i_size)
1488		ext3_truncate_failed_write(inode);
1489	return ret ? ret : copied;
1490}
1491
1492/*
1493 * bmap() is special.  It gets used by applications such as lilo and by
1494 * the swapper to find the on-disk block of a specific piece of data.
1495 *
1496 * Naturally, this is dangerous if the block concerned is still in the
1497 * journal.  If somebody makes a swapfile on an ext3 data-journaling
1498 * filesystem and enables swap, then they may get a nasty shock when the
1499 * data getting swapped to that swapfile suddenly gets overwritten by
1500 * the original zero's written out previously to the journal and
1501 * awaiting writeback in the kernel's buffer cache.
1502 *
1503 * So, if we see any bmap calls here on a modified, data-journaled file,
1504 * take extra steps to flush any blocks which might be in the cache.
1505 */
1506static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1507{
1508	struct inode *inode = mapping->host;
1509	journal_t *journal;
1510	int err;
1511
1512	if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
1513		/*
1514		 * This is a REALLY heavyweight approach, but the use of
1515		 * bmap on dirty files is expected to be extremely rare:
1516		 * only if we run lilo or swapon on a freshly made file
1517		 * do we expect this to happen.
1518		 *
1519		 * (bmap requires CAP_SYS_RAWIO so this does not
1520		 * represent an unprivileged user DOS attack --- we'd be
1521		 * in trouble if mortal users could trigger this path at
1522		 * will.)
1523		 *
1524		 * NB. EXT3_STATE_JDATA is not set on files other than
1525		 * regular files.  If somebody wants to bmap a directory
1526		 * or symlink and gets confused because the buffer
1527		 * hasn't yet been flushed to disk, they deserve
1528		 * everything they get.
1529		 */
1530
1531		ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
1532		journal = EXT3_JOURNAL(inode);
1533		journal_lock_updates(journal);
1534		err = journal_flush(journal);
1535		journal_unlock_updates(journal);
1536
1537		if (err)
1538			return 0;
1539	}
1540
1541	return generic_block_bmap(mapping,block,ext3_get_block);
1542}
1543
1544static int bget_one(handle_t *handle, struct buffer_head *bh)
1545{
1546	get_bh(bh);
1547	return 0;
1548}
1549
1550static int bput_one(handle_t *handle, struct buffer_head *bh)
1551{
1552	put_bh(bh);
1553	return 0;
1554}
1555
1556static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
1557{
1558	return !buffer_mapped(bh);
1559}
1560
1561/*
1562 * Note that whenever we need to map blocks we start a transaction even if
1563 * we're not journalling data.  This is to preserve ordering: any hole
1564 * instantiation within __block_write_full_page -> ext3_get_block() should be
1565 * journalled along with the data so we don't crash and then get metadata which
1566 * refers to old data.
1567 *
1568 * In all journalling modes block_write_full_page() will start the I/O.
1569 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1570 * We don't honour synchronous mounts for writepage().  That would be
1571 * disastrous.  Any write() or metadata operation will sync the fs for
1572 * us.
 
 
 
1573 */
1574static int ext3_ordered_writepage(struct page *page,
1575				struct writeback_control *wbc)
1576{
1577	struct inode *inode = page->mapping->host;
1578	struct buffer_head *page_bufs;
1579	handle_t *handle = NULL;
1580	int ret = 0;
1581	int err;
1582
1583	J_ASSERT(PageLocked(page));
1584	/*
1585	 * We don't want to warn for emergency remount. The condition is
1586	 * ordered to avoid dereferencing inode->i_sb in non-error case to
1587	 * avoid slow-downs.
1588	 */
1589	WARN_ON_ONCE(IS_RDONLY(inode) &&
1590		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1591
1592	/*
1593	 * We give up here if we're reentered, because it might be for a
1594	 * different filesystem.
1595	 */
1596	if (ext3_journal_current_handle())
1597		goto out_fail;
1598
1599	trace_ext3_ordered_writepage(page);
1600	if (!page_has_buffers(page)) {
1601		create_empty_buffers(page, inode->i_sb->s_blocksize,
1602				(1 << BH_Dirty)|(1 << BH_Uptodate));
1603		page_bufs = page_buffers(page);
1604	} else {
1605		page_bufs = page_buffers(page);
1606		if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
1607				       NULL, buffer_unmapped)) {
1608			/* Provide NULL get_block() to catch bugs if buffers
1609			 * weren't really mapped */
1610			return block_write_full_page(page, NULL, wbc);
1611		}
1612	}
1613	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1614
1615	if (IS_ERR(handle)) {
1616		ret = PTR_ERR(handle);
1617		goto out_fail;
1618	}
1619
1620	walk_page_buffers(handle, page_bufs, 0,
1621			PAGE_CACHE_SIZE, NULL, bget_one);
1622
1623	ret = block_write_full_page(page, ext3_get_block, wbc);
1624
1625	/*
1626	 * The page can become unlocked at any point now, and
1627	 * truncate can then come in and change things.  So we
1628	 * can't touch *page from now on.  But *page_bufs is
1629	 * safe due to elevated refcount.
1630	 */
1631
1632	/*
1633	 * And attach them to the current transaction.  But only if
1634	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
1635	 * and generally junk.
1636	 */
1637	if (ret == 0)
1638		ret = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1639					NULL, journal_dirty_data_fn);
 
 
 
1640	walk_page_buffers(handle, page_bufs, 0,
1641			PAGE_CACHE_SIZE, NULL, bput_one);
1642	err = ext3_journal_stop(handle);
1643	if (!ret)
1644		ret = err;
1645	return ret;
1646
1647out_fail:
1648	redirty_page_for_writepage(wbc, page);
1649	unlock_page(page);
1650	return ret;
1651}
1652
1653static int ext3_writeback_writepage(struct page *page,
1654				struct writeback_control *wbc)
1655{
1656	struct inode *inode = page->mapping->host;
1657	handle_t *handle = NULL;
1658	int ret = 0;
1659	int err;
1660
1661	J_ASSERT(PageLocked(page));
1662	/*
1663	 * We don't want to warn for emergency remount. The condition is
1664	 * ordered to avoid dereferencing inode->i_sb in non-error case to
1665	 * avoid slow-downs.
1666	 */
1667	WARN_ON_ONCE(IS_RDONLY(inode) &&
1668		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1669
1670	if (ext3_journal_current_handle())
1671		goto out_fail;
1672
1673	trace_ext3_writeback_writepage(page);
1674	if (page_has_buffers(page)) {
1675		if (!walk_page_buffers(NULL, page_buffers(page), 0,
1676				      PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
1677			/* Provide NULL get_block() to catch bugs if buffers
1678			 * weren't really mapped */
1679			return block_write_full_page(page, NULL, wbc);
1680		}
1681	}
1682
1683	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1684	if (IS_ERR(handle)) {
1685		ret = PTR_ERR(handle);
1686		goto out_fail;
1687	}
1688
1689	ret = block_write_full_page(page, ext3_get_block, wbc);
1690
1691	err = ext3_journal_stop(handle);
1692	if (!ret)
1693		ret = err;
1694	return ret;
1695
1696out_fail:
1697	redirty_page_for_writepage(wbc, page);
1698	unlock_page(page);
1699	return ret;
1700}
1701
1702static int ext3_journalled_writepage(struct page *page,
1703				struct writeback_control *wbc)
1704{
1705	struct inode *inode = page->mapping->host;
1706	handle_t *handle = NULL;
1707	int ret = 0;
1708	int err;
1709
1710	J_ASSERT(PageLocked(page));
1711	/*
1712	 * We don't want to warn for emergency remount. The condition is
1713	 * ordered to avoid dereferencing inode->i_sb in non-error case to
1714	 * avoid slow-downs.
1715	 */
1716	WARN_ON_ONCE(IS_RDONLY(inode) &&
1717		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1718
1719	if (ext3_journal_current_handle())
1720		goto no_write;
1721
1722	trace_ext3_journalled_writepage(page);
1723	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1724	if (IS_ERR(handle)) {
1725		ret = PTR_ERR(handle);
1726		goto no_write;
1727	}
1728
1729	if (!page_has_buffers(page) || PageChecked(page)) {
1730		/*
1731		 * It's mmapped pagecache.  Add buffers and journal it.  There
1732		 * doesn't seem much point in redirtying the page here.
1733		 */
1734		ClearPageChecked(page);
1735		ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
1736					  ext3_get_block);
1737		if (ret != 0) {
1738			ext3_journal_stop(handle);
1739			goto out_unlock;
1740		}
1741		ret = walk_page_buffers(handle, page_buffers(page), 0,
1742			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1743
1744		err = walk_page_buffers(handle, page_buffers(page), 0,
1745				PAGE_CACHE_SIZE, NULL, write_end_fn);
1746		if (ret == 0)
1747			ret = err;
1748		ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1749		atomic_set(&EXT3_I(inode)->i_datasync_tid,
1750			   handle->h_transaction->t_tid);
1751		unlock_page(page);
1752	} else {
1753		/*
1754		 * It may be a page full of checkpoint-mode buffers.  We don't
1755		 * really know unless we go poke around in the buffer_heads.
1756		 * But block_write_full_page will do the right thing.
1757		 */
1758		ret = block_write_full_page(page, ext3_get_block, wbc);
1759	}
1760	err = ext3_journal_stop(handle);
1761	if (!ret)
1762		ret = err;
1763out:
1764	return ret;
1765
1766no_write:
1767	redirty_page_for_writepage(wbc, page);
1768out_unlock:
1769	unlock_page(page);
1770	goto out;
1771}
1772
1773static int ext3_readpage(struct file *file, struct page *page)
1774{
1775	trace_ext3_readpage(page);
1776	return mpage_readpage(page, ext3_get_block);
1777}
1778
1779static int
1780ext3_readpages(struct file *file, struct address_space *mapping,
1781		struct list_head *pages, unsigned nr_pages)
1782{
1783	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1784}
1785
1786static void ext3_invalidatepage(struct page *page, unsigned int offset,
1787				unsigned int length)
1788{
1789	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1790
1791	trace_ext3_invalidatepage(page, offset, length);
1792
1793	/*
1794	 * If it's a full truncate we just forget about the pending dirtying
1795	 */
1796	if (offset == 0 && length == PAGE_CACHE_SIZE)
1797		ClearPageChecked(page);
1798
1799	journal_invalidatepage(journal, page, offset, length);
1800}
1801
1802static int ext3_releasepage(struct page *page, gfp_t wait)
1803{
1804	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1805
1806	trace_ext3_releasepage(page);
1807	WARN_ON(PageChecked(page));
1808	if (!page_has_buffers(page))
1809		return 0;
1810	return journal_try_to_free_buffers(journal, page, wait);
1811}
1812
1813/*
1814 * If the O_DIRECT write will extend the file then add this inode to the
1815 * orphan list.  So recovery will truncate it back to the original size
1816 * if the machine crashes during the write.
1817 *
1818 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1819 * crashes then stale disk data _may_ be exposed inside the file. But current
1820 * VFS code falls back into buffered path in that case so we are safe.
1821 */
1822static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1823			const struct iovec *iov, loff_t offset,
1824			unsigned long nr_segs)
1825{
1826	struct file *file = iocb->ki_filp;
1827	struct inode *inode = file->f_mapping->host;
1828	struct ext3_inode_info *ei = EXT3_I(inode);
1829	handle_t *handle;
1830	ssize_t ret;
1831	int orphan = 0;
1832	size_t count = iov_length(iov, nr_segs);
1833	int retries = 0;
1834
1835	trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
1836
1837	if (rw == WRITE) {
1838		loff_t final_size = offset + count;
1839
1840		if (final_size > inode->i_size) {
1841			/* Credits for sb + inode write */
1842			handle = ext3_journal_start(inode, 2);
1843			if (IS_ERR(handle)) {
1844				ret = PTR_ERR(handle);
1845				goto out;
1846			}
1847			ret = ext3_orphan_add(handle, inode);
1848			if (ret) {
1849				ext3_journal_stop(handle);
1850				goto out;
1851			}
1852			orphan = 1;
1853			ei->i_disksize = inode->i_size;
1854			ext3_journal_stop(handle);
1855		}
1856	}
1857
1858retry:
1859	ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1860				 ext3_get_block);
1861	/*
1862	 * In case of error extending write may have instantiated a few
1863	 * blocks outside i_size. Trim these off again.
1864	 */
1865	if (unlikely((rw & WRITE) && ret < 0)) {
1866		loff_t isize = i_size_read(inode);
1867		loff_t end = offset + iov_length(iov, nr_segs);
1868
1869		if (end > isize)
1870			ext3_truncate_failed_direct_write(inode);
1871	}
1872	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1873		goto retry;
1874
1875	if (orphan) {
1876		int err;
1877
1878		/* Credits for sb + inode write */
1879		handle = ext3_journal_start(inode, 2);
1880		if (IS_ERR(handle)) {
1881			/* This is really bad luck. We've written the data
1882			 * but cannot extend i_size. Truncate allocated blocks
1883			 * and pretend the write failed... */
1884			ext3_truncate_failed_direct_write(inode);
1885			ret = PTR_ERR(handle);
1886			if (inode->i_nlink)
1887				ext3_orphan_del(NULL, inode);
1888			goto out;
1889		}
1890		if (inode->i_nlink)
1891			ext3_orphan_del(handle, inode);
1892		if (ret > 0) {
1893			loff_t end = offset + ret;
1894			if (end > inode->i_size) {
1895				ei->i_disksize = end;
1896				i_size_write(inode, end);
1897				/*
1898				 * We're going to return a positive `ret'
1899				 * here due to non-zero-length I/O, so there's
1900				 * no way of reporting error returns from
1901				 * ext3_mark_inode_dirty() to userspace.  So
1902				 * ignore it.
1903				 */
1904				ext3_mark_inode_dirty(handle, inode);
1905			}
1906		}
1907		err = ext3_journal_stop(handle);
1908		if (ret == 0)
1909			ret = err;
1910	}
1911out:
1912	trace_ext3_direct_IO_exit(inode, offset,
1913				iov_length(iov, nr_segs), rw, ret);
1914	return ret;
1915}
1916
1917/*
1918 * Pages can be marked dirty completely asynchronously from ext3's journalling
1919 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1920 * much here because ->set_page_dirty is called under VFS locks.  The page is
1921 * not necessarily locked.
1922 *
1923 * We cannot just dirty the page and leave attached buffers clean, because the
1924 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1925 * or jbddirty because all the journalling code will explode.
1926 *
1927 * So what we do is to mark the page "pending dirty" and next time writepage
1928 * is called, propagate that into the buffers appropriately.
1929 */
1930static int ext3_journalled_set_page_dirty(struct page *page)
1931{
1932	SetPageChecked(page);
1933	return __set_page_dirty_nobuffers(page);
1934}
1935
1936static const struct address_space_operations ext3_ordered_aops = {
1937	.readpage		= ext3_readpage,
1938	.readpages		= ext3_readpages,
1939	.writepage		= ext3_ordered_writepage,
1940	.write_begin		= ext3_write_begin,
1941	.write_end		= ext3_ordered_write_end,
1942	.bmap			= ext3_bmap,
1943	.invalidatepage		= ext3_invalidatepage,
1944	.releasepage		= ext3_releasepage,
1945	.direct_IO		= ext3_direct_IO,
1946	.migratepage		= buffer_migrate_page,
1947	.is_partially_uptodate  = block_is_partially_uptodate,
1948	.is_dirty_writeback	= buffer_check_dirty_writeback,
1949	.error_remove_page	= generic_error_remove_page,
1950};
1951
1952static const struct address_space_operations ext3_writeback_aops = {
1953	.readpage		= ext3_readpage,
1954	.readpages		= ext3_readpages,
1955	.writepage		= ext3_writeback_writepage,
1956	.write_begin		= ext3_write_begin,
1957	.write_end		= ext3_writeback_write_end,
1958	.bmap			= ext3_bmap,
1959	.invalidatepage		= ext3_invalidatepage,
1960	.releasepage		= ext3_releasepage,
1961	.direct_IO		= ext3_direct_IO,
1962	.migratepage		= buffer_migrate_page,
1963	.is_partially_uptodate  = block_is_partially_uptodate,
1964	.error_remove_page	= generic_error_remove_page,
1965};
1966
1967static const struct address_space_operations ext3_journalled_aops = {
1968	.readpage		= ext3_readpage,
1969	.readpages		= ext3_readpages,
1970	.writepage		= ext3_journalled_writepage,
1971	.write_begin		= ext3_write_begin,
1972	.write_end		= ext3_journalled_write_end,
1973	.set_page_dirty		= ext3_journalled_set_page_dirty,
1974	.bmap			= ext3_bmap,
1975	.invalidatepage		= ext3_invalidatepage,
1976	.releasepage		= ext3_releasepage,
1977	.is_partially_uptodate  = block_is_partially_uptodate,
1978	.error_remove_page	= generic_error_remove_page,
1979};
1980
1981void ext3_set_aops(struct inode *inode)
1982{
1983	if (ext3_should_order_data(inode))
1984		inode->i_mapping->a_ops = &ext3_ordered_aops;
1985	else if (ext3_should_writeback_data(inode))
1986		inode->i_mapping->a_ops = &ext3_writeback_aops;
1987	else
1988		inode->i_mapping->a_ops = &ext3_journalled_aops;
1989}
1990
1991/*
1992 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1993 * up to the end of the block which corresponds to `from'.
1994 * This required during truncate. We need to physically zero the tail end
1995 * of that block so it doesn't yield old data if the file is later grown.
1996 */
1997static int ext3_block_truncate_page(struct inode *inode, loff_t from)
1998{
1999	ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
2000	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
2001	unsigned blocksize, iblock, length, pos;
2002	struct page *page;
2003	handle_t *handle = NULL;
2004	struct buffer_head *bh;
2005	int err = 0;
2006
2007	/* Truncated on block boundary - nothing to do */
2008	blocksize = inode->i_sb->s_blocksize;
2009	if ((from & (blocksize - 1)) == 0)
2010		return 0;
2011
2012	page = grab_cache_page(inode->i_mapping, index);
2013	if (!page)
2014		return -ENOMEM;
2015	length = blocksize - (offset & (blocksize - 1));
2016	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
2017
2018	if (!page_has_buffers(page))
2019		create_empty_buffers(page, blocksize, 0);
2020
2021	/* Find the buffer that contains "offset" */
2022	bh = page_buffers(page);
2023	pos = blocksize;
2024	while (offset >= pos) {
2025		bh = bh->b_this_page;
2026		iblock++;
2027		pos += blocksize;
2028	}
2029
2030	err = 0;
2031	if (buffer_freed(bh)) {
2032		BUFFER_TRACE(bh, "freed: skip");
2033		goto unlock;
2034	}
2035
2036	if (!buffer_mapped(bh)) {
2037		BUFFER_TRACE(bh, "unmapped");
2038		ext3_get_block(inode, iblock, bh, 0);
2039		/* unmapped? It's a hole - nothing to do */
2040		if (!buffer_mapped(bh)) {
2041			BUFFER_TRACE(bh, "still unmapped");
2042			goto unlock;
2043		}
2044	}
2045
2046	/* Ok, it's mapped. Make sure it's up-to-date */
2047	if (PageUptodate(page))
2048		set_buffer_uptodate(bh);
2049
2050	if (!bh_uptodate_or_lock(bh)) {
2051		err = bh_submit_read(bh);
 
 
2052		/* Uhhuh. Read error. Complain and punt. */
2053		if (err)
2054			goto unlock;
2055	}
2056
2057	/* data=writeback mode doesn't need transaction to zero-out data */
2058	if (!ext3_should_writeback_data(inode)) {
2059		/* We journal at most one block */
2060		handle = ext3_journal_start(inode, 1);
2061		if (IS_ERR(handle)) {
2062			clear_highpage(page);
2063			flush_dcache_page(page);
2064			err = PTR_ERR(handle);
2065			goto unlock;
2066		}
2067	}
2068
2069	if (ext3_should_journal_data(inode)) {
2070		BUFFER_TRACE(bh, "get write access");
2071		err = ext3_journal_get_write_access(handle, bh);
2072		if (err)
2073			goto stop;
2074	}
2075
2076	zero_user(page, offset, length);
2077	BUFFER_TRACE(bh, "zeroed end of block");
2078
2079	err = 0;
2080	if (ext3_should_journal_data(inode)) {
2081		err = ext3_journal_dirty_metadata(handle, bh);
2082	} else {
2083		if (ext3_should_order_data(inode))
2084			err = ext3_journal_dirty_data(handle, bh);
2085		mark_buffer_dirty(bh);
2086	}
2087stop:
2088	if (handle)
2089		ext3_journal_stop(handle);
2090
2091unlock:
2092	unlock_page(page);
2093	page_cache_release(page);
2094	return err;
2095}
2096
2097/*
2098 * Probably it should be a library function... search for first non-zero word
2099 * or memcmp with zero_page, whatever is better for particular architecture.
2100 * Linus?
2101 */
2102static inline int all_zeroes(__le32 *p, __le32 *q)
2103{
2104	while (p < q)
2105		if (*p++)
2106			return 0;
2107	return 1;
2108}
2109
2110/**
2111 *	ext3_find_shared - find the indirect blocks for partial truncation.
2112 *	@inode:	  inode in question
2113 *	@depth:	  depth of the affected branch
2114 *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
2115 *	@chain:	  place to store the pointers to partial indirect blocks
2116 *	@top:	  place to the (detached) top of branch
2117 *
2118 *	This is a helper function used by ext3_truncate().
2119 *
2120 *	When we do truncate() we may have to clean the ends of several
2121 *	indirect blocks but leave the blocks themselves alive. Block is
2122 *	partially truncated if some data below the new i_size is referred
2123 *	from it (and it is on the path to the first completely truncated
2124 *	data block, indeed).  We have to free the top of that path along
2125 *	with everything to the right of the path. Since no allocation
2126 *	past the truncation point is possible until ext3_truncate()
2127 *	finishes, we may safely do the latter, but top of branch may
2128 *	require special attention - pageout below the truncation point
2129 *	might try to populate it.
2130 *
2131 *	We atomically detach the top of branch from the tree, store the
2132 *	block number of its root in *@top, pointers to buffer_heads of
2133 *	partially truncated blocks - in @chain[].bh and pointers to
2134 *	their last elements that should not be removed - in
2135 *	@chain[].p. Return value is the pointer to last filled element
2136 *	of @chain.
2137 *
2138 *	The work left to caller to do the actual freeing of subtrees:
2139 *		a) free the subtree starting from *@top
2140 *		b) free the subtrees whose roots are stored in
2141 *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
2142 *		c) free the subtrees growing from the inode past the @chain[0].
2143 *			(no partially truncated stuff there).  */
2144
2145static Indirect *ext3_find_shared(struct inode *inode, int depth,
2146			int offsets[4], Indirect chain[4], __le32 *top)
2147{
2148	Indirect *partial, *p;
2149	int k, err;
2150
2151	*top = 0;
2152	/* Make k index the deepest non-null offset + 1 */
2153	for (k = depth; k > 1 && !offsets[k-1]; k--)
2154		;
2155	partial = ext3_get_branch(inode, k, offsets, chain, &err);
2156	/* Writer: pointers */
2157	if (!partial)
2158		partial = chain + k-1;
2159	/*
2160	 * If the branch acquired continuation since we've looked at it -
2161	 * fine, it should all survive and (new) top doesn't belong to us.
2162	 */
2163	if (!partial->key && *partial->p)
2164		/* Writer: end */
2165		goto no_top;
2166	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2167		;
2168	/*
2169	 * OK, we've found the last block that must survive. The rest of our
2170	 * branch should be detached before unlocking. However, if that rest
2171	 * of branch is all ours and does not grow immediately from the inode
2172	 * it's easier to cheat and just decrement partial->p.
2173	 */
2174	if (p == chain + k - 1 && p > chain) {
2175		p->p--;
2176	} else {
2177		*top = *p->p;
2178		/* Nope, don't do this in ext3.  Must leave the tree intact */
2179#if 0
2180		*p->p = 0;
2181#endif
2182	}
2183	/* Writer: end */
2184
2185	while(partial > p) {
2186		brelse(partial->bh);
2187		partial--;
2188	}
2189no_top:
2190	return partial;
2191}
2192
2193/*
2194 * Zero a number of block pointers in either an inode or an indirect block.
2195 * If we restart the transaction we must again get write access to the
2196 * indirect block for further modification.
2197 *
2198 * We release `count' blocks on disk, but (last - first) may be greater
2199 * than `count' because there can be holes in there.
2200 */
2201static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2202		struct buffer_head *bh, ext3_fsblk_t block_to_free,
2203		unsigned long count, __le32 *first, __le32 *last)
2204{
2205	__le32 *p;
2206	if (try_to_extend_transaction(handle, inode)) {
2207		if (bh) {
2208			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2209			if (ext3_journal_dirty_metadata(handle, bh))
2210				return;
2211		}
2212		ext3_mark_inode_dirty(handle, inode);
2213		truncate_restart_transaction(handle, inode);
2214		if (bh) {
2215			BUFFER_TRACE(bh, "retaking write access");
2216			if (ext3_journal_get_write_access(handle, bh))
2217				return;
2218		}
2219	}
2220
2221	/*
2222	 * Any buffers which are on the journal will be in memory. We find
2223	 * them on the hash table so journal_revoke() will run journal_forget()
2224	 * on them.  We've already detached each block from the file, so
2225	 * bforget() in journal_forget() should be safe.
2226	 *
2227	 * AKPM: turn on bforget in journal_forget()!!!
2228	 */
2229	for (p = first; p < last; p++) {
2230		u32 nr = le32_to_cpu(*p);
2231		if (nr) {
2232			struct buffer_head *bh;
2233
2234			*p = 0;
2235			bh = sb_find_get_block(inode->i_sb, nr);
2236			ext3_forget(handle, 0, inode, bh, nr);
2237		}
2238	}
2239
2240	ext3_free_blocks(handle, inode, block_to_free, count);
2241}
2242
2243/**
2244 * ext3_free_data - free a list of data blocks
2245 * @handle:	handle for this transaction
2246 * @inode:	inode we are dealing with
2247 * @this_bh:	indirect buffer_head which contains *@first and *@last
2248 * @first:	array of block numbers
2249 * @last:	points immediately past the end of array
2250 *
2251 * We are freeing all blocks referred from that array (numbers are stored as
2252 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2253 *
2254 * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2255 * blocks are contiguous then releasing them at one time will only affect one
2256 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2257 * actually use a lot of journal space.
2258 *
2259 * @this_bh will be %NULL if @first and @last point into the inode's direct
2260 * block pointers.
2261 */
2262static void ext3_free_data(handle_t *handle, struct inode *inode,
2263			   struct buffer_head *this_bh,
2264			   __le32 *first, __le32 *last)
2265{
2266	ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2267	unsigned long count = 0;	    /* Number of blocks in the run */
2268	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
2269					       corresponding to
2270					       block_to_free */
2271	ext3_fsblk_t nr;		    /* Current block # */
2272	__le32 *p;			    /* Pointer into inode/ind
2273					       for current block */
2274	int err;
2275
2276	if (this_bh) {				/* For indirect block */
2277		BUFFER_TRACE(this_bh, "get_write_access");
2278		err = ext3_journal_get_write_access(handle, this_bh);
2279		/* Important: if we can't update the indirect pointers
2280		 * to the blocks, we can't free them. */
2281		if (err)
2282			return;
2283	}
2284
2285	for (p = first; p < last; p++) {
2286		nr = le32_to_cpu(*p);
2287		if (nr) {
2288			/* accumulate blocks to free if they're contiguous */
2289			if (count == 0) {
2290				block_to_free = nr;
2291				block_to_free_p = p;
2292				count = 1;
2293			} else if (nr == block_to_free + count) {
2294				count++;
2295			} else {
2296				ext3_clear_blocks(handle, inode, this_bh,
2297						  block_to_free,
2298						  count, block_to_free_p, p);
2299				block_to_free = nr;
2300				block_to_free_p = p;
2301				count = 1;
2302			}
2303		}
2304	}
2305
2306	if (count > 0)
2307		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2308				  count, block_to_free_p, p);
2309
2310	if (this_bh) {
2311		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2312
2313		/*
2314		 * The buffer head should have an attached journal head at this
2315		 * point. However, if the data is corrupted and an indirect
2316		 * block pointed to itself, it would have been detached when
2317		 * the block was cleared. Check for this instead of OOPSing.
2318		 */
2319		if (bh2jh(this_bh))
2320			ext3_journal_dirty_metadata(handle, this_bh);
2321		else
2322			ext3_error(inode->i_sb, "ext3_free_data",
2323				   "circular indirect block detected, "
2324				   "inode=%lu, block=%llu",
2325				   inode->i_ino,
2326				   (unsigned long long)this_bh->b_blocknr);
2327	}
2328}
2329
2330/**
2331 *	ext3_free_branches - free an array of branches
2332 *	@handle: JBD handle for this transaction
2333 *	@inode:	inode we are dealing with
2334 *	@parent_bh: the buffer_head which contains *@first and *@last
2335 *	@first:	array of block numbers
2336 *	@last:	pointer immediately past the end of array
2337 *	@depth:	depth of the branches to free
2338 *
2339 *	We are freeing all blocks referred from these branches (numbers are
2340 *	stored as little-endian 32-bit) and updating @inode->i_blocks
2341 *	appropriately.
2342 */
2343static void ext3_free_branches(handle_t *handle, struct inode *inode,
2344			       struct buffer_head *parent_bh,
2345			       __le32 *first, __le32 *last, int depth)
2346{
2347	ext3_fsblk_t nr;
2348	__le32 *p;
2349
2350	if (is_handle_aborted(handle))
2351		return;
2352
2353	if (depth--) {
2354		struct buffer_head *bh;
2355		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2356		p = last;
2357		while (--p >= first) {
2358			nr = le32_to_cpu(*p);
2359			if (!nr)
2360				continue;		/* A hole */
2361
2362			/* Go read the buffer for the next level down */
2363			bh = sb_bread(inode->i_sb, nr);
2364
2365			/*
2366			 * A read failure? Report error and clear slot
2367			 * (should be rare).
2368			 */
2369			if (!bh) {
2370				ext3_error(inode->i_sb, "ext3_free_branches",
2371					   "Read failure, inode=%lu, block="E3FSBLK,
2372					   inode->i_ino, nr);
2373				continue;
2374			}
2375
2376			/* This zaps the entire block.  Bottom up. */
2377			BUFFER_TRACE(bh, "free child branches");
2378			ext3_free_branches(handle, inode, bh,
2379					   (__le32*)bh->b_data,
2380					   (__le32*)bh->b_data + addr_per_block,
2381					   depth);
2382
2383			/*
2384			 * Everything below this this pointer has been
2385			 * released.  Now let this top-of-subtree go.
2386			 *
2387			 * We want the freeing of this indirect block to be
2388			 * atomic in the journal with the updating of the
2389			 * bitmap block which owns it.  So make some room in
2390			 * the journal.
2391			 *
2392			 * We zero the parent pointer *after* freeing its
2393			 * pointee in the bitmaps, so if extend_transaction()
2394			 * for some reason fails to put the bitmap changes and
2395			 * the release into the same transaction, recovery
2396			 * will merely complain about releasing a free block,
2397			 * rather than leaking blocks.
2398			 */
2399			if (is_handle_aborted(handle))
2400				return;
2401			if (try_to_extend_transaction(handle, inode)) {
2402				ext3_mark_inode_dirty(handle, inode);
2403				truncate_restart_transaction(handle, inode);
2404			}
2405
2406			/*
2407			 * We've probably journalled the indirect block several
2408			 * times during the truncate.  But it's no longer
2409			 * needed and we now drop it from the transaction via
2410			 * journal_revoke().
2411			 *
2412			 * That's easy if it's exclusively part of this
2413			 * transaction.  But if it's part of the committing
2414			 * transaction then journal_forget() will simply
2415			 * brelse() it.  That means that if the underlying
2416			 * block is reallocated in ext3_get_block(),
2417			 * unmap_underlying_metadata() will find this block
2418			 * and will try to get rid of it.  damn, damn. Thus
2419			 * we don't allow a block to be reallocated until
2420			 * a transaction freeing it has fully committed.
2421			 *
2422			 * We also have to make sure journal replay after a
2423			 * crash does not overwrite non-journaled data blocks
2424			 * with old metadata when the block got reallocated for
2425			 * data.  Thus we have to store a revoke record for a
2426			 * block in the same transaction in which we free the
2427			 * block.
2428			 */
2429			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2430
2431			ext3_free_blocks(handle, inode, nr, 1);
2432
2433			if (parent_bh) {
2434				/*
2435				 * The block which we have just freed is
2436				 * pointed to by an indirect block: journal it
2437				 */
2438				BUFFER_TRACE(parent_bh, "get_write_access");
2439				if (!ext3_journal_get_write_access(handle,
2440								   parent_bh)){
2441					*p = 0;
2442					BUFFER_TRACE(parent_bh,
2443					"call ext3_journal_dirty_metadata");
2444					ext3_journal_dirty_metadata(handle,
2445								    parent_bh);
2446				}
2447			}
2448		}
2449	} else {
2450		/* We have reached the bottom of the tree. */
2451		BUFFER_TRACE(parent_bh, "free data blocks");
2452		ext3_free_data(handle, inode, parent_bh, first, last);
2453	}
2454}
2455
2456int ext3_can_truncate(struct inode *inode)
2457{
2458	if (S_ISREG(inode->i_mode))
2459		return 1;
2460	if (S_ISDIR(inode->i_mode))
2461		return 1;
2462	if (S_ISLNK(inode->i_mode))
2463		return !ext3_inode_is_fast_symlink(inode);
2464	return 0;
2465}
2466
2467/*
2468 * ext3_truncate()
2469 *
2470 * We block out ext3_get_block() block instantiations across the entire
2471 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2472 * simultaneously on behalf of the same inode.
2473 *
2474 * As we work through the truncate and commit bits of it to the journal there
2475 * is one core, guiding principle: the file's tree must always be consistent on
2476 * disk.  We must be able to restart the truncate after a crash.
2477 *
2478 * The file's tree may be transiently inconsistent in memory (although it
2479 * probably isn't), but whenever we close off and commit a journal transaction,
2480 * the contents of (the filesystem + the journal) must be consistent and
2481 * restartable.  It's pretty simple, really: bottom up, right to left (although
2482 * left-to-right works OK too).
2483 *
2484 * Note that at recovery time, journal replay occurs *before* the restart of
2485 * truncate against the orphan inode list.
2486 *
2487 * The committed inode has the new, desired i_size (which is the same as
2488 * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2489 * that this inode's truncate did not complete and it will again call
2490 * ext3_truncate() to have another go.  So there will be instantiated blocks
2491 * to the right of the truncation point in a crashed ext3 filesystem.  But
2492 * that's fine - as long as they are linked from the inode, the post-crash
2493 * ext3_truncate() run will find them and release them.
2494 */
2495void ext3_truncate(struct inode *inode)
2496{
2497	handle_t *handle;
2498	struct ext3_inode_info *ei = EXT3_I(inode);
2499	__le32 *i_data = ei->i_data;
2500	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2501	int offsets[4];
2502	Indirect chain[4];
2503	Indirect *partial;
2504	__le32 nr = 0;
2505	int n;
2506	long last_block;
2507	unsigned blocksize = inode->i_sb->s_blocksize;
2508
2509	trace_ext3_truncate_enter(inode);
2510
2511	if (!ext3_can_truncate(inode))
2512		goto out_notrans;
2513
2514	if (inode->i_size == 0 && ext3_should_writeback_data(inode))
2515		ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
2516
2517	handle = start_transaction(inode);
2518	if (IS_ERR(handle))
2519		goto out_notrans;
2520
2521	last_block = (inode->i_size + blocksize-1)
2522					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2523	n = ext3_block_to_path(inode, last_block, offsets, NULL);
2524	if (n == 0)
2525		goto out_stop;	/* error */
2526
2527	/*
2528	 * OK.  This truncate is going to happen.  We add the inode to the
2529	 * orphan list, so that if this truncate spans multiple transactions,
2530	 * and we crash, we will resume the truncate when the filesystem
2531	 * recovers.  It also marks the inode dirty, to catch the new size.
2532	 *
2533	 * Implication: the file must always be in a sane, consistent
2534	 * truncatable state while each transaction commits.
2535	 */
2536	if (ext3_orphan_add(handle, inode))
2537		goto out_stop;
2538
2539	/*
2540	 * The orphan list entry will now protect us from any crash which
2541	 * occurs before the truncate completes, so it is now safe to propagate
2542	 * the new, shorter inode size (held for now in i_size) into the
2543	 * on-disk inode. We do this via i_disksize, which is the value which
2544	 * ext3 *really* writes onto the disk inode.
2545	 */
2546	ei->i_disksize = inode->i_size;
2547
2548	/*
2549	 * From here we block out all ext3_get_block() callers who want to
2550	 * modify the block allocation tree.
2551	 */
2552	mutex_lock(&ei->truncate_mutex);
2553
2554	if (n == 1) {		/* direct blocks */
2555		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2556			       i_data + EXT3_NDIR_BLOCKS);
2557		goto do_indirects;
2558	}
2559
2560	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2561	/* Kill the top of shared branch (not detached) */
2562	if (nr) {
2563		if (partial == chain) {
2564			/* Shared branch grows from the inode */
2565			ext3_free_branches(handle, inode, NULL,
2566					   &nr, &nr+1, (chain+n-1) - partial);
2567			*partial->p = 0;
2568			/*
2569			 * We mark the inode dirty prior to restart,
2570			 * and prior to stop.  No need for it here.
2571			 */
2572		} else {
2573			/* Shared branch grows from an indirect block */
2574			ext3_free_branches(handle, inode, partial->bh,
2575					partial->p,
2576					partial->p+1, (chain+n-1) - partial);
2577		}
2578	}
2579	/* Clear the ends of indirect blocks on the shared branch */
2580	while (partial > chain) {
2581		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2582				   (__le32*)partial->bh->b_data+addr_per_block,
2583				   (chain+n-1) - partial);
2584		BUFFER_TRACE(partial->bh, "call brelse");
2585		brelse (partial->bh);
2586		partial--;
2587	}
2588do_indirects:
2589	/* Kill the remaining (whole) subtrees */
2590	switch (offsets[0]) {
2591	default:
2592		nr = i_data[EXT3_IND_BLOCK];
2593		if (nr) {
2594			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2595			i_data[EXT3_IND_BLOCK] = 0;
2596		}
2597	case EXT3_IND_BLOCK:
2598		nr = i_data[EXT3_DIND_BLOCK];
2599		if (nr) {
2600			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2601			i_data[EXT3_DIND_BLOCK] = 0;
2602		}
2603	case EXT3_DIND_BLOCK:
2604		nr = i_data[EXT3_TIND_BLOCK];
2605		if (nr) {
2606			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2607			i_data[EXT3_TIND_BLOCK] = 0;
2608		}
2609	case EXT3_TIND_BLOCK:
2610		;
2611	}
2612
2613	ext3_discard_reservation(inode);
2614
2615	mutex_unlock(&ei->truncate_mutex);
2616	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2617	ext3_mark_inode_dirty(handle, inode);
2618
2619	/*
2620	 * In a multi-transaction truncate, we only make the final transaction
2621	 * synchronous
2622	 */
2623	if (IS_SYNC(inode))
2624		handle->h_sync = 1;
2625out_stop:
2626	/*
2627	 * If this was a simple ftruncate(), and the file will remain alive
2628	 * then we need to clear up the orphan record which we created above.
2629	 * However, if this was a real unlink then we were called by
2630	 * ext3_evict_inode(), and we allow that function to clean up the
2631	 * orphan info for us.
2632	 */
2633	if (inode->i_nlink)
2634		ext3_orphan_del(handle, inode);
2635
2636	ext3_journal_stop(handle);
2637	trace_ext3_truncate_exit(inode);
2638	return;
2639out_notrans:
2640	/*
2641	 * Delete the inode from orphan list so that it doesn't stay there
2642	 * forever and trigger assertion on umount.
2643	 */
2644	if (inode->i_nlink)
2645		ext3_orphan_del(NULL, inode);
2646	trace_ext3_truncate_exit(inode);
2647}
2648
2649static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2650		unsigned long ino, struct ext3_iloc *iloc)
2651{
2652	unsigned long block_group;
2653	unsigned long offset;
2654	ext3_fsblk_t block;
2655	struct ext3_group_desc *gdp;
2656
2657	if (!ext3_valid_inum(sb, ino)) {
2658		/*
2659		 * This error is already checked for in namei.c unless we are
2660		 * looking at an NFS filehandle, in which case no error
2661		 * report is needed
2662		 */
2663		return 0;
2664	}
2665
2666	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2667	gdp = ext3_get_group_desc(sb, block_group, NULL);
2668	if (!gdp)
2669		return 0;
2670	/*
2671	 * Figure out the offset within the block group inode table
2672	 */
2673	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2674		EXT3_INODE_SIZE(sb);
2675	block = le32_to_cpu(gdp->bg_inode_table) +
2676		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
2677
2678	iloc->block_group = block_group;
2679	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2680	return block;
2681}
2682
2683/*
2684 * ext3_get_inode_loc returns with an extra refcount against the inode's
2685 * underlying buffer_head on success. If 'in_mem' is true, we have all
2686 * data in memory that is needed to recreate the on-disk version of this
2687 * inode.
2688 */
2689static int __ext3_get_inode_loc(struct inode *inode,
2690				struct ext3_iloc *iloc, int in_mem)
2691{
2692	ext3_fsblk_t block;
2693	struct buffer_head *bh;
2694
2695	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2696	if (!block)
2697		return -EIO;
2698
2699	bh = sb_getblk(inode->i_sb, block);
2700	if (unlikely(!bh)) {
2701		ext3_error (inode->i_sb, "ext3_get_inode_loc",
2702				"unable to read inode block - "
2703				"inode=%lu, block="E3FSBLK,
2704				 inode->i_ino, block);
2705		return -ENOMEM;
2706	}
2707	if (!buffer_uptodate(bh)) {
2708		lock_buffer(bh);
2709
2710		/*
2711		 * If the buffer has the write error flag, we have failed
2712		 * to write out another inode in the same block.  In this
2713		 * case, we don't have to read the block because we may
2714		 * read the old inode data successfully.
2715		 */
2716		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2717			set_buffer_uptodate(bh);
2718
2719		if (buffer_uptodate(bh)) {
2720			/* someone brought it uptodate while we waited */
2721			unlock_buffer(bh);
2722			goto has_buffer;
2723		}
2724
2725		/*
2726		 * If we have all information of the inode in memory and this
2727		 * is the only valid inode in the block, we need not read the
2728		 * block.
2729		 */
2730		if (in_mem) {
2731			struct buffer_head *bitmap_bh;
2732			struct ext3_group_desc *desc;
2733			int inodes_per_buffer;
2734			int inode_offset, i;
2735			int block_group;
2736			int start;
2737
2738			block_group = (inode->i_ino - 1) /
2739					EXT3_INODES_PER_GROUP(inode->i_sb);
2740			inodes_per_buffer = bh->b_size /
2741				EXT3_INODE_SIZE(inode->i_sb);
2742			inode_offset = ((inode->i_ino - 1) %
2743					EXT3_INODES_PER_GROUP(inode->i_sb));
2744			start = inode_offset & ~(inodes_per_buffer - 1);
2745
2746			/* Is the inode bitmap in cache? */
2747			desc = ext3_get_group_desc(inode->i_sb,
2748						block_group, NULL);
2749			if (!desc)
2750				goto make_io;
2751
2752			bitmap_bh = sb_getblk(inode->i_sb,
2753					le32_to_cpu(desc->bg_inode_bitmap));
2754			if (unlikely(!bitmap_bh))
2755				goto make_io;
2756
2757			/*
2758			 * If the inode bitmap isn't in cache then the
2759			 * optimisation may end up performing two reads instead
2760			 * of one, so skip it.
2761			 */
2762			if (!buffer_uptodate(bitmap_bh)) {
2763				brelse(bitmap_bh);
2764				goto make_io;
2765			}
2766			for (i = start; i < start + inodes_per_buffer; i++) {
2767				if (i == inode_offset)
2768					continue;
2769				if (ext3_test_bit(i, bitmap_bh->b_data))
2770					break;
2771			}
2772			brelse(bitmap_bh);
2773			if (i == start + inodes_per_buffer) {
2774				/* all other inodes are free, so skip I/O */
2775				memset(bh->b_data, 0, bh->b_size);
2776				set_buffer_uptodate(bh);
2777				unlock_buffer(bh);
2778				goto has_buffer;
2779			}
2780		}
2781
2782make_io:
2783		/*
2784		 * There are other valid inodes in the buffer, this inode
2785		 * has in-inode xattrs, or we don't have this inode in memory.
2786		 * Read the block from disk.
2787		 */
2788		trace_ext3_load_inode(inode);
2789		get_bh(bh);
2790		bh->b_end_io = end_buffer_read_sync;
2791		submit_bh(READ | REQ_META | REQ_PRIO, bh);
2792		wait_on_buffer(bh);
2793		if (!buffer_uptodate(bh)) {
2794			ext3_error(inode->i_sb, "ext3_get_inode_loc",
2795					"unable to read inode block - "
2796					"inode=%lu, block="E3FSBLK,
2797					inode->i_ino, block);
2798			brelse(bh);
2799			return -EIO;
2800		}
2801	}
2802has_buffer:
2803	iloc->bh = bh;
2804	return 0;
2805}
2806
2807int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2808{
2809	/* We have all inode data except xattrs in memory here. */
2810	return __ext3_get_inode_loc(inode, iloc,
2811		!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
2812}
2813
2814void ext3_set_inode_flags(struct inode *inode)
2815{
2816	unsigned int flags = EXT3_I(inode)->i_flags;
2817
2818	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2819	if (flags & EXT3_SYNC_FL)
2820		inode->i_flags |= S_SYNC;
2821	if (flags & EXT3_APPEND_FL)
2822		inode->i_flags |= S_APPEND;
2823	if (flags & EXT3_IMMUTABLE_FL)
2824		inode->i_flags |= S_IMMUTABLE;
2825	if (flags & EXT3_NOATIME_FL)
2826		inode->i_flags |= S_NOATIME;
2827	if (flags & EXT3_DIRSYNC_FL)
2828		inode->i_flags |= S_DIRSYNC;
2829}
2830
2831/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2832void ext3_get_inode_flags(struct ext3_inode_info *ei)
2833{
2834	unsigned int flags = ei->vfs_inode.i_flags;
2835
2836	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2837			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2838	if (flags & S_SYNC)
2839		ei->i_flags |= EXT3_SYNC_FL;
2840	if (flags & S_APPEND)
2841		ei->i_flags |= EXT3_APPEND_FL;
2842	if (flags & S_IMMUTABLE)
2843		ei->i_flags |= EXT3_IMMUTABLE_FL;
2844	if (flags & S_NOATIME)
2845		ei->i_flags |= EXT3_NOATIME_FL;
2846	if (flags & S_DIRSYNC)
2847		ei->i_flags |= EXT3_DIRSYNC_FL;
2848}
2849
2850struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2851{
2852	struct ext3_iloc iloc;
2853	struct ext3_inode *raw_inode;
2854	struct ext3_inode_info *ei;
2855	struct buffer_head *bh;
2856	struct inode *inode;
2857	journal_t *journal = EXT3_SB(sb)->s_journal;
2858	transaction_t *transaction;
2859	long ret;
2860	int block;
2861	uid_t i_uid;
2862	gid_t i_gid;
2863
2864	inode = iget_locked(sb, ino);
2865	if (!inode)
2866		return ERR_PTR(-ENOMEM);
2867	if (!(inode->i_state & I_NEW))
2868		return inode;
2869
2870	ei = EXT3_I(inode);
2871	ei->i_block_alloc_info = NULL;
2872
2873	ret = __ext3_get_inode_loc(inode, &iloc, 0);
2874	if (ret < 0)
2875		goto bad_inode;
2876	bh = iloc.bh;
2877	raw_inode = ext3_raw_inode(&iloc);
2878	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2879	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2880	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2881	if(!(test_opt (inode->i_sb, NO_UID32))) {
2882		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2883		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2884	}
2885	i_uid_write(inode, i_uid);
2886	i_gid_write(inode, i_gid);
2887	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
2888	inode->i_size = le32_to_cpu(raw_inode->i_size);
2889	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2890	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2891	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2892	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2893
2894	ei->i_state_flags = 0;
2895	ei->i_dir_start_lookup = 0;
2896	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2897	/* We now have enough fields to check if the inode was active or not.
2898	 * This is needed because nfsd might try to access dead inodes
2899	 * the test is that same one that e2fsck uses
2900	 * NeilBrown 1999oct15
2901	 */
2902	if (inode->i_nlink == 0) {
2903		if (inode->i_mode == 0 ||
2904		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2905			/* this inode is deleted */
2906			brelse (bh);
2907			ret = -ESTALE;
2908			goto bad_inode;
2909		}
2910		/* The only unlinked inodes we let through here have
2911		 * valid i_mode and are being read by the orphan
2912		 * recovery code: that's fine, we're about to complete
2913		 * the process of deleting those. */
2914	}
2915	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2916	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2917#ifdef EXT3_FRAGMENTS
2918	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2919	ei->i_frag_no = raw_inode->i_frag;
2920	ei->i_frag_size = raw_inode->i_fsize;
2921#endif
2922	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2923	if (!S_ISREG(inode->i_mode)) {
2924		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2925	} else {
2926		inode->i_size |=
2927			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2928	}
2929	ei->i_disksize = inode->i_size;
2930	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2931	ei->i_block_group = iloc.block_group;
2932	/*
2933	 * NOTE! The in-memory inode i_data array is in little-endian order
2934	 * even on big-endian machines: we do NOT byteswap the block numbers!
2935	 */
2936	for (block = 0; block < EXT3_N_BLOCKS; block++)
2937		ei->i_data[block] = raw_inode->i_block[block];
2938	INIT_LIST_HEAD(&ei->i_orphan);
2939
2940	/*
2941	 * Set transaction id's of transactions that have to be committed
2942	 * to finish f[data]sync. We set them to currently running transaction
2943	 * as we cannot be sure that the inode or some of its metadata isn't
2944	 * part of the transaction - the inode could have been reclaimed and
2945	 * now it is reread from disk.
2946	 */
2947	if (journal) {
2948		tid_t tid;
2949
2950		spin_lock(&journal->j_state_lock);
2951		if (journal->j_running_transaction)
2952			transaction = journal->j_running_transaction;
2953		else
2954			transaction = journal->j_committing_transaction;
2955		if (transaction)
2956			tid = transaction->t_tid;
2957		else
2958			tid = journal->j_commit_sequence;
2959		spin_unlock(&journal->j_state_lock);
2960		atomic_set(&ei->i_sync_tid, tid);
2961		atomic_set(&ei->i_datasync_tid, tid);
2962	}
2963
2964	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2965	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2966		/*
2967		 * When mke2fs creates big inodes it does not zero out
2968		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
2969		 * so ignore those first few inodes.
2970		 */
2971		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2972		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2973		    EXT3_INODE_SIZE(inode->i_sb)) {
2974			brelse (bh);
2975			ret = -EIO;
2976			goto bad_inode;
2977		}
2978		if (ei->i_extra_isize == 0) {
2979			/* The extra space is currently unused. Use it. */
2980			ei->i_extra_isize = sizeof(struct ext3_inode) -
2981					    EXT3_GOOD_OLD_INODE_SIZE;
2982		} else {
2983			__le32 *magic = (void *)raw_inode +
2984					EXT3_GOOD_OLD_INODE_SIZE +
2985					ei->i_extra_isize;
2986			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2987				 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
2988		}
2989	} else
2990		ei->i_extra_isize = 0;
2991
2992	if (S_ISREG(inode->i_mode)) {
2993		inode->i_op = &ext3_file_inode_operations;
2994		inode->i_fop = &ext3_file_operations;
2995		ext3_set_aops(inode);
2996	} else if (S_ISDIR(inode->i_mode)) {
2997		inode->i_op = &ext3_dir_inode_operations;
2998		inode->i_fop = &ext3_dir_operations;
2999	} else if (S_ISLNK(inode->i_mode)) {
3000		if (ext3_inode_is_fast_symlink(inode)) {
3001			inode->i_op = &ext3_fast_symlink_inode_operations;
3002			nd_terminate_link(ei->i_data, inode->i_size,
3003				sizeof(ei->i_data) - 1);
3004		} else {
3005			inode->i_op = &ext3_symlink_inode_operations;
3006			ext3_set_aops(inode);
3007		}
3008	} else {
3009		inode->i_op = &ext3_special_inode_operations;
3010		if (raw_inode->i_block[0])
3011			init_special_inode(inode, inode->i_mode,
3012			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3013		else
3014			init_special_inode(inode, inode->i_mode,
3015			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3016	}
3017	brelse (iloc.bh);
3018	ext3_set_inode_flags(inode);
3019	unlock_new_inode(inode);
3020	return inode;
3021
3022bad_inode:
3023	iget_failed(inode);
3024	return ERR_PTR(ret);
3025}
3026
3027/*
3028 * Post the struct inode info into an on-disk inode location in the
3029 * buffer-cache.  This gobbles the caller's reference to the
3030 * buffer_head in the inode location struct.
3031 *
3032 * The caller must have write access to iloc->bh.
3033 */
3034static int ext3_do_update_inode(handle_t *handle,
3035				struct inode *inode,
3036				struct ext3_iloc *iloc)
3037{
3038	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
3039	struct ext3_inode_info *ei = EXT3_I(inode);
3040	struct buffer_head *bh = iloc->bh;
3041	int err = 0, rc, block;
3042	int need_datasync = 0;
3043	__le32 disksize;
3044	uid_t i_uid;
3045	gid_t i_gid;
3046
3047again:
3048	/* we can't allow multiple procs in here at once, its a bit racey */
3049	lock_buffer(bh);
3050
3051	/* For fields not not tracking in the in-memory inode,
3052	 * initialise them to zero for new inodes. */
3053	if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
3054		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
3055
3056	ext3_get_inode_flags(ei);
3057	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3058	i_uid = i_uid_read(inode);
3059	i_gid = i_gid_read(inode);
3060	if(!(test_opt(inode->i_sb, NO_UID32))) {
3061		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
3062		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
3063/*
3064 * Fix up interoperability with old kernels. Otherwise, old inodes get
3065 * re-used with the upper 16 bits of the uid/gid intact
3066 */
3067		if(!ei->i_dtime) {
3068			raw_inode->i_uid_high =
3069				cpu_to_le16(high_16_bits(i_uid));
3070			raw_inode->i_gid_high =
3071				cpu_to_le16(high_16_bits(i_gid));
3072		} else {
3073			raw_inode->i_uid_high = 0;
3074			raw_inode->i_gid_high = 0;
3075		}
3076	} else {
3077		raw_inode->i_uid_low =
3078			cpu_to_le16(fs_high2lowuid(i_uid));
3079		raw_inode->i_gid_low =
3080			cpu_to_le16(fs_high2lowgid(i_gid));
3081		raw_inode->i_uid_high = 0;
3082		raw_inode->i_gid_high = 0;
3083	}
3084	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
3085	disksize = cpu_to_le32(ei->i_disksize);
3086	if (disksize != raw_inode->i_size) {
3087		need_datasync = 1;
3088		raw_inode->i_size = disksize;
3089	}
3090	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
3091	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
3092	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
3093	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
3094	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
3095	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
3096#ifdef EXT3_FRAGMENTS
3097	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
3098	raw_inode->i_frag = ei->i_frag_no;
3099	raw_inode->i_fsize = ei->i_frag_size;
3100#endif
3101	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
3102	if (!S_ISREG(inode->i_mode)) {
3103		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
3104	} else {
3105		disksize = cpu_to_le32(ei->i_disksize >> 32);
3106		if (disksize != raw_inode->i_size_high) {
3107			raw_inode->i_size_high = disksize;
3108			need_datasync = 1;
3109		}
3110		if (ei->i_disksize > 0x7fffffffULL) {
3111			struct super_block *sb = inode->i_sb;
3112			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
3113					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
3114			    EXT3_SB(sb)->s_es->s_rev_level ==
3115					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
3116			       /* If this is the first large file
3117				* created, add a flag to the superblock.
3118				*/
3119				unlock_buffer(bh);
3120				err = ext3_journal_get_write_access(handle,
3121						EXT3_SB(sb)->s_sbh);
3122				if (err)
3123					goto out_brelse;
3124
3125				ext3_update_dynamic_rev(sb);
3126				EXT3_SET_RO_COMPAT_FEATURE(sb,
3127					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
3128				handle->h_sync = 1;
3129				err = ext3_journal_dirty_metadata(handle,
3130						EXT3_SB(sb)->s_sbh);
3131				/* get our lock and start over */
3132				goto again;
3133			}
3134		}
3135	}
3136	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3137	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3138		if (old_valid_dev(inode->i_rdev)) {
3139			raw_inode->i_block[0] =
3140				cpu_to_le32(old_encode_dev(inode->i_rdev));
3141			raw_inode->i_block[1] = 0;
3142		} else {
3143			raw_inode->i_block[0] = 0;
3144			raw_inode->i_block[1] =
3145				cpu_to_le32(new_encode_dev(inode->i_rdev));
3146			raw_inode->i_block[2] = 0;
3147		}
3148	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
3149		raw_inode->i_block[block] = ei->i_data[block];
3150
3151	if (ei->i_extra_isize)
3152		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3153
3154	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
3155	unlock_buffer(bh);
3156	rc = ext3_journal_dirty_metadata(handle, bh);
3157	if (!err)
3158		err = rc;
3159	ext3_clear_inode_state(inode, EXT3_STATE_NEW);
3160
3161	atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
3162	if (need_datasync)
3163		atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
3164out_brelse:
3165	brelse (bh);
3166	ext3_std_error(inode->i_sb, err);
3167	return err;
3168}
3169
3170/*
3171 * ext3_write_inode()
3172 *
3173 * We are called from a few places:
3174 *
3175 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
3176 *   Here, there will be no transaction running. We wait for any running
3177 *   transaction to commit.
3178 *
3179 * - Within flush work (for sys_sync(), kupdate and such).
3180 *   We wait on commit, if told to.
3181 *
3182 * - Within iput_final() -> write_inode_now()
3183 *   We wait on commit, if told to.
 
3184 *
3185 * In all cases it is actually safe for us to return without doing anything,
3186 * because the inode has been copied into a raw inode buffer in
3187 * ext3_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
3188 * writeback.
3189 *
3190 * Note that we are absolutely dependent upon all inode dirtiers doing the
3191 * right thing: they *must* call mark_inode_dirty() after dirtying info in
3192 * which we are interested.
3193 *
3194 * It would be a bug for them to not do this.  The code:
3195 *
3196 *	mark_inode_dirty(inode)
3197 *	stuff();
3198 *	inode->i_size = expr;
3199 *
3200 * is in error because write_inode() could occur while `stuff()' is running,
3201 * and the new i_size will be lost.  Plus the inode will no longer be on the
3202 * superblock's dirty inode list.
3203 */
3204int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
3205{
3206	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
3207		return 0;
3208
3209	if (ext3_journal_current_handle()) {
3210		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3211		dump_stack();
3212		return -EIO;
3213	}
3214
3215	/*
3216	 * No need to force transaction in WB_SYNC_NONE mode. Also
3217	 * ext3_sync_fs() will force the commit after everything is
3218	 * written.
3219	 */
3220	if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
3221		return 0;
3222
3223	return ext3_force_commit(inode->i_sb);
3224}
3225
3226/*
3227 * ext3_setattr()
3228 *
3229 * Called from notify_change.
3230 *
3231 * We want to trap VFS attempts to truncate the file as soon as
3232 * possible.  In particular, we want to make sure that when the VFS
3233 * shrinks i_size, we put the inode on the orphan list and modify
3234 * i_disksize immediately, so that during the subsequent flushing of
3235 * dirty pages and freeing of disk blocks, we can guarantee that any
3236 * commit will leave the blocks being flushed in an unused state on
3237 * disk.  (On recovery, the inode will get truncated and the blocks will
3238 * be freed, so we have a strong guarantee that no future commit will
3239 * leave these blocks visible to the user.)
3240 *
3241 * Called with inode->sem down.
3242 */
3243int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3244{
3245	struct inode *inode = dentry->d_inode;
3246	int error, rc = 0;
3247	const unsigned int ia_valid = attr->ia_valid;
3248
3249	error = inode_change_ok(inode, attr);
3250	if (error)
3251		return error;
3252
3253	if (is_quota_modification(inode, attr))
3254		dquot_initialize(inode);
3255	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3256	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3257		handle_t *handle;
3258
3259		/* (user+group)*(old+new) structure, inode write (sb,
3260		 * inode block, ? - but truncate inode update has it) */
3261		handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
3262					EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
3263		if (IS_ERR(handle)) {
3264			error = PTR_ERR(handle);
3265			goto err_out;
3266		}
3267		error = dquot_transfer(inode, attr);
3268		if (error) {
3269			ext3_journal_stop(handle);
3270			return error;
3271		}
3272		/* Update corresponding info in inode so that everything is in
3273		 * one transaction */
3274		if (attr->ia_valid & ATTR_UID)
3275			inode->i_uid = attr->ia_uid;
3276		if (attr->ia_valid & ATTR_GID)
3277			inode->i_gid = attr->ia_gid;
3278		error = ext3_mark_inode_dirty(handle, inode);
3279		ext3_journal_stop(handle);
3280	}
3281
3282	if (attr->ia_valid & ATTR_SIZE)
3283		inode_dio_wait(inode);
3284
3285	if (S_ISREG(inode->i_mode) &&
3286	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3287		handle_t *handle;
3288
3289		handle = ext3_journal_start(inode, 3);
3290		if (IS_ERR(handle)) {
3291			error = PTR_ERR(handle);
3292			goto err_out;
3293		}
3294
3295		error = ext3_orphan_add(handle, inode);
3296		if (error) {
3297			ext3_journal_stop(handle);
3298			goto err_out;
3299		}
3300		EXT3_I(inode)->i_disksize = attr->ia_size;
3301		error = ext3_mark_inode_dirty(handle, inode);
3302		ext3_journal_stop(handle);
3303		if (error) {
3304			/* Some hard fs error must have happened. Bail out. */
3305			ext3_orphan_del(NULL, inode);
3306			goto err_out;
3307		}
3308		rc = ext3_block_truncate_page(inode, attr->ia_size);
3309		if (rc) {
3310			/* Cleanup orphan list and exit */
3311			handle = ext3_journal_start(inode, 3);
3312			if (IS_ERR(handle)) {
3313				ext3_orphan_del(NULL, inode);
3314				goto err_out;
3315			}
3316			ext3_orphan_del(handle, inode);
3317			ext3_journal_stop(handle);
3318			goto err_out;
3319		}
3320	}
3321
3322	if ((attr->ia_valid & ATTR_SIZE) &&
3323	    attr->ia_size != i_size_read(inode)) {
3324		truncate_setsize(inode, attr->ia_size);
3325		ext3_truncate(inode);
3326	}
3327
3328	setattr_copy(inode, attr);
3329	mark_inode_dirty(inode);
3330
3331	if (ia_valid & ATTR_MODE)
3332		rc = posix_acl_chmod(inode, inode->i_mode);
3333
3334err_out:
3335	ext3_std_error(inode->i_sb, error);
3336	if (!error)
3337		error = rc;
3338	return error;
3339}
3340
3341
3342/*
3343 * How many blocks doth make a writepage()?
3344 *
3345 * With N blocks per page, it may be:
3346 * N data blocks
3347 * 2 indirect block
3348 * 2 dindirect
3349 * 1 tindirect
3350 * N+5 bitmap blocks (from the above)
3351 * N+5 group descriptor summary blocks
3352 * 1 inode block
3353 * 1 superblock.
3354 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3355 *
3356 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3357 *
3358 * With ordered or writeback data it's the same, less the N data blocks.
3359 *
3360 * If the inode's direct blocks can hold an integral number of pages then a
3361 * page cannot straddle two indirect blocks, and we can only touch one indirect
3362 * and dindirect block, and the "5" above becomes "3".
3363 *
3364 * This still overestimates under most circumstances.  If we were to pass the
3365 * start and end offsets in here as well we could do block_to_path() on each
3366 * block and work out the exact number of indirects which are touched.  Pah.
3367 */
3368
3369static int ext3_writepage_trans_blocks(struct inode *inode)
3370{
3371	int bpp = ext3_journal_blocks_per_page(inode);
3372	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3373	int ret;
3374
3375	if (ext3_should_journal_data(inode))
3376		ret = 3 * (bpp + indirects) + 2;
3377	else
3378		ret = 2 * (bpp + indirects) + indirects + 2;
3379
3380#ifdef CONFIG_QUOTA
3381	/* We know that structure was already allocated during dquot_initialize so
3382	 * we will be updating only the data blocks + inodes */
3383	ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
3384#endif
3385
3386	return ret;
3387}
3388
3389/*
3390 * The caller must have previously called ext3_reserve_inode_write().
3391 * Give this, we know that the caller already has write access to iloc->bh.
3392 */
3393int ext3_mark_iloc_dirty(handle_t *handle,
3394		struct inode *inode, struct ext3_iloc *iloc)
3395{
3396	int err = 0;
3397
3398	/* the do_update_inode consumes one bh->b_count */
3399	get_bh(iloc->bh);
3400
3401	/* ext3_do_update_inode() does journal_dirty_metadata */
3402	err = ext3_do_update_inode(handle, inode, iloc);
3403	put_bh(iloc->bh);
3404	return err;
3405}
3406
3407/*
3408 * On success, We end up with an outstanding reference count against
3409 * iloc->bh.  This _must_ be cleaned up later.
3410 */
3411
3412int
3413ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3414			 struct ext3_iloc *iloc)
3415{
3416	int err = 0;
3417	if (handle) {
3418		err = ext3_get_inode_loc(inode, iloc);
3419		if (!err) {
3420			BUFFER_TRACE(iloc->bh, "get_write_access");
3421			err = ext3_journal_get_write_access(handle, iloc->bh);
3422			if (err) {
3423				brelse(iloc->bh);
3424				iloc->bh = NULL;
3425			}
3426		}
3427	}
3428	ext3_std_error(inode->i_sb, err);
3429	return err;
3430}
3431
3432/*
3433 * What we do here is to mark the in-core inode as clean with respect to inode
3434 * dirtiness (it may still be data-dirty).
3435 * This means that the in-core inode may be reaped by prune_icache
3436 * without having to perform any I/O.  This is a very good thing,
3437 * because *any* task may call prune_icache - even ones which
3438 * have a transaction open against a different journal.
3439 *
3440 * Is this cheating?  Not really.  Sure, we haven't written the
3441 * inode out, but prune_icache isn't a user-visible syncing function.
3442 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3443 * we start and wait on commits.
 
 
 
 
 
 
 
 
3444 */
3445int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3446{
3447	struct ext3_iloc iloc;
3448	int err;
3449
3450	might_sleep();
3451	trace_ext3_mark_inode_dirty(inode, _RET_IP_);
3452	err = ext3_reserve_inode_write(handle, inode, &iloc);
3453	if (!err)
3454		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3455	return err;
3456}
3457
3458/*
3459 * ext3_dirty_inode() is called from __mark_inode_dirty()
3460 *
3461 * We're really interested in the case where a file is being extended.
3462 * i_size has been changed by generic_commit_write() and we thus need
3463 * to include the updated inode in the current transaction.
3464 *
3465 * Also, dquot_alloc_space() will always dirty the inode when blocks
3466 * are allocated to the file.
3467 *
3468 * If the inode is marked synchronous, we don't honour that here - doing
3469 * so would cause a commit on atime updates, which we don't bother doing.
3470 * We handle synchronous inodes at the highest possible level.
3471 */
3472void ext3_dirty_inode(struct inode *inode, int flags)
3473{
3474	handle_t *current_handle = ext3_journal_current_handle();
3475	handle_t *handle;
3476
3477	handle = ext3_journal_start(inode, 2);
3478	if (IS_ERR(handle))
3479		goto out;
3480	if (current_handle &&
3481		current_handle->h_transaction != handle->h_transaction) {
3482		/* This task has a transaction open against a different fs */
3483		printk(KERN_EMERG "%s: transactions do not match!\n",
3484		       __func__);
3485	} else {
3486		jbd_debug(5, "marking dirty.  outer handle=%p\n",
3487				current_handle);
3488		ext3_mark_inode_dirty(handle, inode);
3489	}
3490	ext3_journal_stop(handle);
3491out:
3492	return;
3493}
3494
3495#if 0
3496/*
3497 * Bind an inode's backing buffer_head into this transaction, to prevent
3498 * it from being flushed to disk early.  Unlike
3499 * ext3_reserve_inode_write, this leaves behind no bh reference and
3500 * returns no iloc structure, so the caller needs to repeat the iloc
3501 * lookup to mark the inode dirty later.
3502 */
3503static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3504{
3505	struct ext3_iloc iloc;
3506
3507	int err = 0;
3508	if (handle) {
3509		err = ext3_get_inode_loc(inode, &iloc);
3510		if (!err) {
3511			BUFFER_TRACE(iloc.bh, "get_write_access");
3512			err = journal_get_write_access(handle, iloc.bh);
3513			if (!err)
3514				err = ext3_journal_dirty_metadata(handle,
3515								  iloc.bh);
3516			brelse(iloc.bh);
3517		}
3518	}
3519	ext3_std_error(inode->i_sb, err);
3520	return err;
3521}
3522#endif
3523
3524int ext3_change_inode_journal_flag(struct inode *inode, int val)
3525{
3526	journal_t *journal;
3527	handle_t *handle;
3528	int err;
3529
3530	/*
3531	 * We have to be very careful here: changing a data block's
3532	 * journaling status dynamically is dangerous.  If we write a
3533	 * data block to the journal, change the status and then delete
3534	 * that block, we risk forgetting to revoke the old log record
3535	 * from the journal and so a subsequent replay can corrupt data.
3536	 * So, first we make sure that the journal is empty and that
3537	 * nobody is changing anything.
3538	 */
3539
3540	journal = EXT3_JOURNAL(inode);
3541	if (is_journal_aborted(journal))
3542		return -EROFS;
3543
3544	journal_lock_updates(journal);
3545	journal_flush(journal);
3546
3547	/*
3548	 * OK, there are no updates running now, and all cached data is
3549	 * synced to disk.  We are now in a completely consistent state
3550	 * which doesn't have anything in the journal, and we know that
3551	 * no filesystem updates are running, so it is safe to modify
3552	 * the inode's in-core data-journaling state flag now.
3553	 */
3554
3555	if (val)
3556		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3557	else
3558		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3559	ext3_set_aops(inode);
3560
3561	journal_unlock_updates(journal);
3562
3563	/* Finally we can mark the inode as dirty. */
3564
3565	handle = ext3_journal_start(inode, 1);
3566	if (IS_ERR(handle))
3567		return PTR_ERR(handle);
3568
3569	err = ext3_mark_inode_dirty(handle, inode);
3570	handle->h_sync = 1;
3571	ext3_journal_stop(handle);
3572	ext3_std_error(inode->i_sb, err);
3573
3574	return err;
3575}