Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
   3 */
   4
   5#include <linux/time.h>
   6#include <linux/fs.h>
   7#include <linux/reiserfs_fs.h>
   8#include <linux/reiserfs_acl.h>
   9#include <linux/reiserfs_xattr.h>
  10#include <linux/exportfs.h>
  11#include <linux/pagemap.h>
  12#include <linux/highmem.h>
  13#include <linux/slab.h>
  14#include <asm/uaccess.h>
  15#include <asm/unaligned.h>
  16#include <linux/buffer_head.h>
  17#include <linux/mpage.h>
  18#include <linux/writeback.h>
  19#include <linux/quotaops.h>
  20#include <linux/swap.h>
 
 
  21
  22int reiserfs_commit_write(struct file *f, struct page *page,
  23			  unsigned from, unsigned to);
  24
  25void reiserfs_evict_inode(struct inode *inode)
  26{
  27	/* We need blocks for transaction + (user+group) quota update (possibly delete) */
 
 
 
  28	int jbegin_count =
  29	    JOURNAL_PER_BALANCE_CNT * 2 +
  30	    2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
  31	struct reiserfs_transaction_handle th;
  32	int depth;
  33	int err;
  34
  35	if (!inode->i_nlink && !is_bad_inode(inode))
  36		dquot_initialize(inode);
  37
  38	truncate_inode_pages(&inode->i_data, 0);
  39	if (inode->i_nlink)
  40		goto no_delete;
  41
  42	depth = reiserfs_write_lock_once(inode->i_sb);
 
 
 
 
 
  43
  44	/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
  45	if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {	/* also handles bad_inode case */
  46		reiserfs_delete_xattrs(inode);
  47
 
 
  48		if (journal_begin(&th, inode->i_sb, jbegin_count))
  49			goto out;
  50		reiserfs_update_inode_transaction(inode);
  51
  52		reiserfs_discard_prealloc(&th, inode);
  53
  54		err = reiserfs_delete_object(&th, inode);
  55
  56		/* Do quota update inside a transaction for journaled quotas. We must do that
  57		 * after delete_object so that quota updates go into the same transaction as
  58		 * stat data deletion */
  59		if (!err) 
 
 
 
  60			dquot_free_inode(inode);
 
 
  61
  62		if (journal_end(&th, inode->i_sb, jbegin_count))
  63			goto out;
  64
  65		/* check return value from reiserfs_delete_object after
 
  66		 * ending the transaction
  67		 */
  68		if (err)
  69		    goto out;
  70
  71		/* all items of file are deleted, so we can remove "save" link */
  72		remove_save_link(inode, 0 /* not truncate */ );	/* we can't do anything
  73								 * about an error here */
 
 
 
 
 
  74	} else {
  75		/* no object items are in the tree */
  76		;
  77	}
  78      out:
  79	end_writeback(inode);	/* note this must go after the journal_end to prevent deadlock */
 
 
  80	dquot_drop(inode);
  81	inode->i_blocks = 0;
  82	reiserfs_write_unlock_once(inode->i_sb, depth);
  83	return;
  84
  85no_delete:
  86	end_writeback(inode);
  87	dquot_drop(inode);
  88}
  89
  90static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
  91			  __u32 objectid, loff_t offset, int type, int length)
  92{
  93	key->version = version;
  94
  95	key->on_disk_key.k_dir_id = dirid;
  96	key->on_disk_key.k_objectid = objectid;
  97	set_cpu_key_k_offset(key, offset);
  98	set_cpu_key_k_type(key, type);
  99	key->key_length = length;
 100}
 101
 102/* take base of inode_key (it comes from inode always) (dirid, objectid) and version from an inode, set
 103   offset and type of key */
 
 
 104void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
 105		  int type, int length)
 106{
 107	_make_cpu_key(key, get_inode_item_key_version(inode),
 108		      le32_to_cpu(INODE_PKEY(inode)->k_dir_id),
 109		      le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,
 110		      length);
 111}
 112
 113//
 114// when key is 0, do not set version and short key
 115//
 116inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
 117			      int version,
 118			      loff_t offset, int type, int length,
 119			      int entry_count /*or ih_free_space */ )
 120{
 121	if (key) {
 122		ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);
 123		ih->ih_key.k_objectid =
 124		    cpu_to_le32(key->on_disk_key.k_objectid);
 125	}
 126	put_ih_version(ih, version);
 127	set_le_ih_k_offset(ih, offset);
 128	set_le_ih_k_type(ih, type);
 129	put_ih_item_len(ih, length);
 130	/*    set_ih_free_space (ih, 0); */
 131	// for directory items it is entry count, for directs and stat
 132	// datas - 0xffff, for indirects - 0
 
 
 133	put_ih_entry_count(ih, entry_count);
 134}
 135
 136//
 137// FIXME: we might cache recently accessed indirect item
 
 
 
 
 138
 139// Ugh.  Not too eager for that....
 140//  I cut the code until such time as I see a convincing argument (benchmark).
 141// I don't want a bloated inode struct..., and I don't like code complexity....
 142
 143/* cutting the code is fine, since it really isn't in use yet and is easy
 144** to add back in.  But, Vladimir has a really good idea here.  Think
 145** about what happens for reading a file.  For each page,
 146** The VFS layer calls reiserfs_readpage, who searches the tree to find
 147** an indirect item.  This indirect item has X number of pointers, where
 148** X is a big number if we've done the block allocation right.  But,
 149** we only use one or two of these pointers during each call to readpage,
 150** needlessly researching again later on.
 151**
 152** The size of the cache could be dynamic based on the size of the file.
 153**
 154** I'd also like to see us cache the location the stat data item, since
 155** we are needlessly researching for that frequently.
 156**
 157** --chris
 158*/
 159
 160/* If this page has a file tail in it, and
 161** it was read in by get_block_create_0, the page data is valid,
 162** but tail is still sitting in a direct item, and we can't write to
 163** it.  So, look through this page, and check all the mapped buffers
 164** to make sure they have valid block numbers.  Any that don't need
 165** to be unmapped, so that __block_write_begin will correctly call
 166** reiserfs_get_block to convert the tail into an unformatted node
 167*/
 
 168static inline void fix_tail_page_for_writing(struct page *page)
 169{
 170	struct buffer_head *head, *next, *bh;
 171
 172	if (page && page_has_buffers(page)) {
 173		head = page_buffers(page);
 174		bh = head;
 175		do {
 176			next = bh->b_this_page;
 177			if (buffer_mapped(bh) && bh->b_blocknr == 0) {
 178				reiserfs_unmap_buffer(bh);
 179			}
 180			bh = next;
 181		} while (bh != head);
 182	}
 183}
 184
 185/* reiserfs_get_block does not need to allocate a block only if it has been
 186   done already or non-hole position has been found in the indirect item */
 
 
 187static inline int allocation_needed(int retval, b_blocknr_t allocated,
 188				    struct item_head *ih,
 189				    __le32 * item, int pos_in_item)
 190{
 191	if (allocated)
 192		return 0;
 193	if (retval == POSITION_FOUND && is_indirect_le_ih(ih) &&
 194	    get_block_num(item, pos_in_item))
 195		return 0;
 196	return 1;
 197}
 198
 199static inline int indirect_item_found(int retval, struct item_head *ih)
 200{
 201	return (retval == POSITION_FOUND) && is_indirect_le_ih(ih);
 202}
 203
 204static inline void set_block_dev_mapped(struct buffer_head *bh,
 205					b_blocknr_t block, struct inode *inode)
 206{
 207	map_bh(bh, inode->i_sb, block);
 208}
 209
 210//
 211// files which were created in the earlier version can not be longer,
 212// than 2 gb
 213//
 214static int file_capable(struct inode *inode, sector_t block)
 215{
 216	if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||	// it is new file.
 217	    block < (1 << (31 - inode->i_sb->s_blocksize_bits)))	// old file, but 'block' is inside of 2gb
 
 
 218		return 1;
 219
 220	return 0;
 221}
 222
 223static int restart_transaction(struct reiserfs_transaction_handle *th,
 224			       struct inode *inode, struct treepath *path)
 225{
 226	struct super_block *s = th->t_super;
 227	int len = th->t_blocks_allocated;
 228	int err;
 229
 230	BUG_ON(!th->t_trans_id);
 231	BUG_ON(!th->t_refcount);
 232
 233	pathrelse(path);
 234
 235	/* we cannot restart while nested */
 236	if (th->t_refcount > 1) {
 237		return 0;
 238	}
 239	reiserfs_update_sd(th, inode);
 240	err = journal_end(th, s, len);
 241	if (!err) {
 242		err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);
 243		if (!err)
 244			reiserfs_update_inode_transaction(inode);
 245	}
 246	return err;
 247}
 248
 249// it is called by get_block when create == 0. Returns block number
 250// for 'block'-th logical block of file. When it hits direct item it
 251// returns 0 (being called from bmap) or read direct item into piece
 252// of page (bh_result)
 253
 254// Please improve the english/clarity in the comment above, as it is
 255// hard to understand.
 256
 257static int _get_block_create_0(struct inode *inode, sector_t block,
 258			       struct buffer_head *bh_result, int args)
 259{
 260	INITIALIZE_PATH(path);
 261	struct cpu_key key;
 262	struct buffer_head *bh;
 263	struct item_head *ih, tmp_ih;
 264	b_blocknr_t blocknr;
 265	char *p = NULL;
 266	int chars;
 267	int ret;
 268	int result;
 269	int done = 0;
 270	unsigned long offset;
 271
 272	// prepare the key to look for the 'block'-th block of file
 273	make_cpu_key(&key, inode,
 274		     (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
 275		     3);
 276
 277	result = search_for_position_by_key(inode->i_sb, &key, &path);
 278	if (result != POSITION_FOUND) {
 279		pathrelse(&path);
 280		if (p)
 281			kunmap(bh_result->b_page);
 282		if (result == IO_ERROR)
 283			return -EIO;
 284		// We do not return -ENOENT if there is a hole but page is uptodate, because it means
 285		// That there is some MMAPED data associated with it that is yet to be written to disk.
 
 
 
 286		if ((args & GET_BLOCK_NO_HOLE)
 287		    && !PageUptodate(bh_result->b_page)) {
 288			return -ENOENT;
 289		}
 290		return 0;
 291	}
 292	//
 293	bh = get_last_bh(&path);
 294	ih = get_ih(&path);
 295	if (is_indirect_le_ih(ih)) {
 296		__le32 *ind_item = (__le32 *) B_I_PITEM(bh, ih);
 297
 298		/* FIXME: here we could cache indirect item or part of it in
 299		   the inode to avoid search_by_key in case of subsequent
 300		   access to file */
 
 
 301		blocknr = get_block_num(ind_item, path.pos_in_item);
 302		ret = 0;
 303		if (blocknr) {
 304			map_bh(bh_result, inode->i_sb, blocknr);
 305			if (path.pos_in_item ==
 306			    ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {
 307				set_buffer_boundary(bh_result);
 308			}
 309		} else
 310			// We do not return -ENOENT if there is a hole but page is uptodate, because it means
 311			// That there is some MMAPED data associated with it that is yet to  be written to disk.
 
 
 
 
 312		if ((args & GET_BLOCK_NO_HOLE)
 313			    && !PageUptodate(bh_result->b_page)) {
 314			ret = -ENOENT;
 315		}
 316
 317		pathrelse(&path);
 318		if (p)
 319			kunmap(bh_result->b_page);
 320		return ret;
 321	}
 322	// requested data are in direct item(s)
 323	if (!(args & GET_BLOCK_READ_DIRECT)) {
 324		// we are called by bmap. FIXME: we can not map block of file
 325		// when it is stored in direct item(s)
 
 
 326		pathrelse(&path);
 327		if (p)
 328			kunmap(bh_result->b_page);
 329		return -ENOENT;
 330	}
 331
 332	/* if we've got a direct item, and the buffer or page was uptodate,
 333	 ** we don't want to pull data off disk again.  skip to the
 334	 ** end, where we map the buffer and return
 
 335	 */
 336	if (buffer_uptodate(bh_result)) {
 337		goto finished;
 338	} else
 339		/*
 340		 ** grab_tail_page can trigger calls to reiserfs_get_block on up to date
 341		 ** pages without any buffers.  If the page is up to date, we don't want
 342		 ** read old data off disk.  Set the up to date bit on the buffer instead
 343		 ** and jump to the end
 344		 */
 345	if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
 346		set_buffer_uptodate(bh_result);
 347		goto finished;
 348	}
 349	// read file tail into part of page
 350	offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
 351	copy_item_head(&tmp_ih, ih);
 352
 353	/* we only want to kmap if we are reading the tail into the page.
 354	 ** this is not the common case, so we don't kmap until we are
 355	 ** sure we need to.  But, this means the item might move if
 356	 ** kmap schedules
 
 357	 */
 358	if (!p)
 359		p = (char *)kmap(bh_result->b_page);
 360
 361	p += offset;
 362	memset(p, 0, inode->i_sb->s_blocksize);
 363	do {
 364		if (!is_direct_le_ih(ih)) {
 365			BUG();
 366		}
 367		/* make sure we don't read more bytes than actually exist in
 368		 ** the file.  This can happen in odd cases where i_size isn't
 369		 ** correct, and when direct item padding results in a few
 370		 ** extra bytes at the end of the direct item
 
 371		 */
 372		if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
 373			break;
 374		if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
 375			chars =
 376			    inode->i_size - (le_ih_k_offset(ih) - 1) -
 377			    path.pos_in_item;
 378			done = 1;
 379		} else {
 380			chars = ih_item_len(ih) - path.pos_in_item;
 381		}
 382		memcpy(p, B_I_PITEM(bh, ih) + path.pos_in_item, chars);
 383
 384		if (done)
 385			break;
 386
 387		p += chars;
 388
 
 
 
 
 
 
 389		if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
 390			// we done, if read direct item is not the last item of
 391			// node FIXME: we could try to check right delimiting key
 392			// to see whether direct item continues in the right
 393			// neighbor or rely on i_size
 394			break;
 395
 396		// update key to look for the next piece
 397		set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
 398		result = search_for_position_by_key(inode->i_sb, &key, &path);
 399		if (result != POSITION_FOUND)
 400			// i/o error most likely
 401			break;
 402		bh = get_last_bh(&path);
 403		ih = get_ih(&path);
 404	} while (1);
 405
 406	flush_dcache_page(bh_result->b_page);
 407	kunmap(bh_result->b_page);
 408
 409      finished:
 410	pathrelse(&path);
 411
 412	if (result == IO_ERROR)
 413		return -EIO;
 414
 415	/* this buffer has valid data, but isn't valid for io.  mapping it to
 
 416	 * block #0 tells the rest of reiserfs it just has a tail in it
 417	 */
 418	map_bh(bh_result, inode->i_sb, 0);
 419	set_buffer_uptodate(bh_result);
 420	return 0;
 421}
 422
 423// this is called to create file map. So, _get_block_create_0 will not
 424// read direct item
 
 
 425static int reiserfs_bmap(struct inode *inode, sector_t block,
 426			 struct buffer_head *bh_result, int create)
 427{
 428	if (!file_capable(inode, block))
 429		return -EFBIG;
 430
 431	reiserfs_write_lock(inode->i_sb);
 432	/* do not read the direct item */
 433	_get_block_create_0(inode, block, bh_result, 0);
 434	reiserfs_write_unlock(inode->i_sb);
 435	return 0;
 436}
 437
 438/* special version of get_block that is only used by grab_tail_page right
 439** now.  It is sent to __block_write_begin, and when you try to get a
 440** block past the end of the file (or a block from a hole) it returns
 441** -ENOENT instead of a valid buffer.  __block_write_begin expects to
 442** be able to do i/o on the buffers returned, unless an error value
 443** is also returned.
 444**
 445** So, this allows __block_write_begin to be used for reading a single block
 446** in a page.  Where it does not produce a valid page for holes, or past the
 447** end of the file.  This turns out to be exactly what we need for reading
 448** tails for conversion.
 449**
 450** The point of the wrapper is forcing a certain value for create, even
 451** though the VFS layer is calling this function with create==1.  If you
 452** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
 453** don't use this function.
 
 454*/
 455static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
 456				       struct buffer_head *bh_result,
 457				       int create)
 458{
 459	return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
 460}
 461
 462/* This is special helper for reiserfs_get_block in case we are executing
 463   direct_IO request. */
 
 
 464static int reiserfs_get_blocks_direct_io(struct inode *inode,
 465					 sector_t iblock,
 466					 struct buffer_head *bh_result,
 467					 int create)
 468{
 469	int ret;
 470
 471	bh_result->b_page = NULL;
 472
 473	/* We set the b_size before reiserfs_get_block call since it is
 474	   referenced in convert_tail_for_hole() that may be called from
 475	   reiserfs_get_block() */
 476	bh_result->b_size = (1 << inode->i_blkbits);
 
 
 477
 478	ret = reiserfs_get_block(inode, iblock, bh_result,
 479				 create | GET_BLOCK_NO_DANGLE);
 480	if (ret)
 481		goto out;
 482
 483	/* don't allow direct io onto tail pages */
 484	if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
 485		/* make sure future calls to the direct io funcs for this offset
 486		 ** in the file fail by unmapping the buffer
 
 487		 */
 488		clear_buffer_mapped(bh_result);
 489		ret = -EINVAL;
 490	}
 491	/* Possible unpacked tail. Flush the data before pages have
 492	   disappeared */
 
 
 
 493	if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
 494		int err;
 495
 496		reiserfs_write_lock(inode->i_sb);
 497
 498		err = reiserfs_commit_for_inode(inode);
 499		REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
 500
 501		reiserfs_write_unlock(inode->i_sb);
 502
 503		if (err < 0)
 504			ret = err;
 505	}
 506      out:
 507	return ret;
 508}
 509
 510/*
 511** helper function for when reiserfs_get_block is called for a hole
 512** but the file tail is still in a direct item
 513** bh_result is the buffer head for the hole
 514** tail_offset is the offset of the start of the tail in the file
 515**
 516** This calls prepare_write, which will start a new transaction
 517** you should not be in a transaction, or have any paths held when you
 518** call this.
 519*/
 520static int convert_tail_for_hole(struct inode *inode,
 521				 struct buffer_head *bh_result,
 522				 loff_t tail_offset)
 523{
 524	unsigned long index;
 525	unsigned long tail_end;
 526	unsigned long tail_start;
 527	struct page *tail_page;
 528	struct page *hole_page = bh_result->b_page;
 529	int retval = 0;
 530
 531	if ((tail_offset & (bh_result->b_size - 1)) != 1)
 532		return -EIO;
 533
 534	/* always try to read until the end of the block */
 535	tail_start = tail_offset & (PAGE_CACHE_SIZE - 1);
 536	tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
 537
 538	index = tail_offset >> PAGE_CACHE_SHIFT;
 539	/* hole_page can be zero in case of direct_io, we are sure
 540	   that we cannot get here if we write with O_DIRECT into
 541	   tail page */
 
 542	if (!hole_page || index != hole_page->index) {
 543		tail_page = grab_cache_page(inode->i_mapping, index);
 544		retval = -ENOMEM;
 545		if (!tail_page) {
 546			goto out;
 547		}
 548	} else {
 549		tail_page = hole_page;
 550	}
 551
 552	/* we don't have to make sure the conversion did not happen while
 553	 ** we were locking the page because anyone that could convert
 554	 ** must first take i_mutex.
 555	 **
 556	 ** We must fix the tail page for writing because it might have buffers
 557	 ** that are mapped, but have a block number of 0.  This indicates tail
 558	 ** data that has been read directly into the page, and
 559	 ** __block_write_begin won't trigger a get_block in this case.
 
 560	 */
 561	fix_tail_page_for_writing(tail_page);
 562	retval = __reiserfs_write_begin(tail_page, tail_start,
 563				      tail_end - tail_start);
 564	if (retval)
 565		goto unlock;
 566
 567	/* tail conversion might change the data in the page */
 568	flush_dcache_page(tail_page);
 569
 570	retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end);
 571
 572      unlock:
 573	if (tail_page != hole_page) {
 574		unlock_page(tail_page);
 575		page_cache_release(tail_page);
 576	}
 577      out:
 578	return retval;
 579}
 580
 581static inline int _allocate_block(struct reiserfs_transaction_handle *th,
 582				  sector_t block,
 583				  struct inode *inode,
 584				  b_blocknr_t * allocated_block_nr,
 585				  struct treepath *path, int flags)
 586{
 587	BUG_ON(!th->t_trans_id);
 588
 589#ifdef REISERFS_PREALLOCATE
 590	if (!(flags & GET_BLOCK_NO_IMUX)) {
 591		return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
 592						  path, block);
 593	}
 594#endif
 595	return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path,
 596					 block);
 597}
 598
 599int reiserfs_get_block(struct inode *inode, sector_t block,
 600		       struct buffer_head *bh_result, int create)
 601{
 602	int repeat, retval = 0;
 603	b_blocknr_t allocated_block_nr = 0;	// b_blocknr_t is (unsigned) 32 bit int
 
 604	INITIALIZE_PATH(path);
 605	int pos_in_item;
 606	struct cpu_key key;
 607	struct buffer_head *bh, *unbh = NULL;
 608	struct item_head *ih, tmp_ih;
 609	__le32 *item;
 610	int done;
 611	int fs_gen;
 612	int lock_depth;
 613	struct reiserfs_transaction_handle *th = NULL;
 614	/* space reserved in transaction batch:
 615	   . 3 balancings in direct->indirect conversion
 616	   . 1 block involved into reiserfs_update_sd()
 617	   XXX in practically impossible worst case direct2indirect()
 618	   can incur (much) more than 3 balancings.
 619	   quota update for user, group */
 
 
 620	int jbegin_count =
 621	    JOURNAL_PER_BALANCE_CNT * 3 + 1 +
 622	    2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
 623	int version;
 624	int dangle = 1;
 625	loff_t new_offset =
 626	    (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
 627
 628	lock_depth = reiserfs_write_lock_once(inode->i_sb);
 629	version = get_inode_item_key_version(inode);
 630
 631	if (!file_capable(inode, block)) {
 632		reiserfs_write_unlock_once(inode->i_sb, lock_depth);
 633		return -EFBIG;
 634	}
 635
 636	/* if !create, we aren't changing the FS, so we don't need to
 637	 ** log anything, so we don't need to start a transaction
 
 638	 */
 639	if (!(create & GET_BLOCK_CREATE)) {
 640		int ret;
 641		/* find number of block-th logical block of the file */
 642		ret = _get_block_create_0(inode, block, bh_result,
 643					  create | GET_BLOCK_READ_DIRECT);
 644		reiserfs_write_unlock_once(inode->i_sb, lock_depth);
 645		return ret;
 646	}
 
 647	/*
 648	 * if we're already in a transaction, make sure to close
 649	 * any new transactions we start in this func
 650	 */
 651	if ((create & GET_BLOCK_NO_DANGLE) ||
 652	    reiserfs_transaction_running(inode->i_sb))
 653		dangle = 0;
 654
 655	/* If file is of such a size, that it might have a tail and tails are enabled
 656	 ** we should mark it as possibly needing tail packing on close
 
 
 657	 */
 658	if ((have_large_tails(inode->i_sb)
 659	     && inode->i_size < i_block_size(inode) * 4)
 660	    || (have_small_tails(inode->i_sb)
 661		&& inode->i_size < i_block_size(inode)))
 662		REISERFS_I(inode)->i_flags |= i_pack_on_close_mask;
 663
 664	/* set the key of the first byte in the 'block'-th block of file */
 665	make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ );
 666	if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
 667	      start_trans:
 668		th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
 669		if (!th) {
 670			retval = -ENOMEM;
 671			goto failure;
 672		}
 673		reiserfs_update_inode_transaction(inode);
 674	}
 675      research:
 676
 677	retval = search_for_position_by_key(inode->i_sb, &key, &path);
 678	if (retval == IO_ERROR) {
 679		retval = -EIO;
 680		goto failure;
 681	}
 682
 683	bh = get_last_bh(&path);
 684	ih = get_ih(&path);
 685	item = get_item(&path);
 686	pos_in_item = path.pos_in_item;
 687
 688	fs_gen = get_generation(inode->i_sb);
 689	copy_item_head(&tmp_ih, ih);
 690
 691	if (allocation_needed
 692	    (retval, allocated_block_nr, ih, item, pos_in_item)) {
 693		/* we have to allocate block for the unformatted node */
 694		if (!th) {
 695			pathrelse(&path);
 696			goto start_trans;
 697		}
 698
 699		repeat =
 700		    _allocate_block(th, block, inode, &allocated_block_nr,
 701				    &path, create);
 702
 
 
 
 
 
 703		if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
 704			/* restart the transaction to give the journal a chance to free
 705			 ** some blocks.  releases the path, so we have to go back to
 706			 ** research if we succeed on the second try
 707			 */
 708			SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
 709			retval = restart_transaction(th, inode, &path);
 710			if (retval)
 711				goto failure;
 712			repeat =
 713			    _allocate_block(th, block, inode,
 714					    &allocated_block_nr, NULL, create);
 715
 716			if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) {
 717				goto research;
 718			}
 719			if (repeat == QUOTA_EXCEEDED)
 720				retval = -EDQUOT;
 721			else
 722				retval = -ENOSPC;
 723			goto failure;
 724		}
 725
 726		if (fs_changed(fs_gen, inode->i_sb)
 727		    && item_moved(&tmp_ih, &path)) {
 728			goto research;
 729		}
 730	}
 731
 732	if (indirect_item_found(retval, ih)) {
 733		b_blocknr_t unfm_ptr;
 734		/* 'block'-th block is in the file already (there is
 735		   corresponding cell in some indirect item). But it may be
 736		   zero unformatted node pointer (hole) */
 
 
 737		unfm_ptr = get_block_num(item, pos_in_item);
 738		if (unfm_ptr == 0) {
 739			/* use allocated block to plug the hole */
 740			reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
 741			if (fs_changed(fs_gen, inode->i_sb)
 742			    && item_moved(&tmp_ih, &path)) {
 743				reiserfs_restore_prepared_buffer(inode->i_sb,
 744								 bh);
 745				goto research;
 746			}
 747			set_buffer_new(bh_result);
 748			if (buffer_dirty(bh_result)
 749			    && reiserfs_data_ordered(inode->i_sb))
 750				reiserfs_add_ordered_list(inode, bh_result);
 751			put_block_num(item, pos_in_item, allocated_block_nr);
 752			unfm_ptr = allocated_block_nr;
 753			journal_mark_dirty(th, inode->i_sb, bh);
 754			reiserfs_update_sd(th, inode);
 755		}
 756		set_block_dev_mapped(bh_result, unfm_ptr, inode);
 757		pathrelse(&path);
 758		retval = 0;
 759		if (!dangle && th)
 760			retval = reiserfs_end_persistent_transaction(th);
 761
 762		reiserfs_write_unlock_once(inode->i_sb, lock_depth);
 763
 764		/* the item was found, so new blocks were not added to the file
 765		 ** there is no need to make sure the inode is updated with this
 766		 ** transaction
 
 767		 */
 768		return retval;
 769	}
 770
 771	if (!th) {
 772		pathrelse(&path);
 773		goto start_trans;
 774	}
 775
 776	/* desired position is not found or is in the direct item. We have
 777	   to append file with holes up to 'block'-th block converting
 778	   direct items to indirect one if necessary */
 
 
 779	done = 0;
 780	do {
 781		if (is_statdata_le_ih(ih)) {
 782			__le32 unp = 0;
 783			struct cpu_key tmp_key;
 784
 785			/* indirect item has to be inserted */
 786			make_le_item_head(&tmp_ih, &key, version, 1,
 787					  TYPE_INDIRECT, UNFM_P_SIZE,
 788					  0 /* free_space */ );
 789
 
 
 
 
 790			if (cpu_key_k_offset(&key) == 1) {
 791				/* we are going to add 'block'-th block to the file. Use
 792				   allocated block for that */
 793				unp = cpu_to_le32(allocated_block_nr);
 794				set_block_dev_mapped(bh_result,
 795						     allocated_block_nr, inode);
 796				set_buffer_new(bh_result);
 797				done = 1;
 798			}
 799			tmp_key = key;	// ;)
 800			set_cpu_key_k_offset(&tmp_key, 1);
 801			PATH_LAST_POSITION(&path)++;
 802
 803			retval =
 804			    reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih,
 805						 inode, (char *)&unp);
 806			if (retval) {
 807				reiserfs_free_block(th, inode,
 808						    allocated_block_nr, 1);
 809				goto failure;	// retval == -ENOSPC, -EDQUOT or -EIO or -EEXIST
 
 
 
 
 810			}
 811			//mark_tail_converted (inode);
 812		} else if (is_direct_le_ih(ih)) {
 813			/* direct item has to be converted */
 814			loff_t tail_offset;
 815
 816			tail_offset =
 817			    ((le_ih_k_offset(ih) -
 818			      1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
 
 
 
 
 
 
 819			if (tail_offset == cpu_key_k_offset(&key)) {
 820				/* direct item we just found fits into block we have
 821				   to map. Convert it into unformatted node: use
 822				   bh_result for the conversion */
 823				set_block_dev_mapped(bh_result,
 824						     allocated_block_nr, inode);
 825				unbh = bh_result;
 826				done = 1;
 827			} else {
 828				/* we have to padd file tail stored in direct item(s)
 829				   up to block size and convert it to unformatted
 830				   node. FIXME: this should also get into page cache */
 
 
 
 831
 832				pathrelse(&path);
 833				/*
 834				 * ugly, but we can only end the transaction if
 835				 * we aren't nested
 836				 */
 837				BUG_ON(!th->t_refcount);
 838				if (th->t_refcount == 1) {
 839					retval =
 840					    reiserfs_end_persistent_transaction
 841					    (th);
 842					th = NULL;
 843					if (retval)
 844						goto failure;
 845				}
 846
 847				retval =
 848				    convert_tail_for_hole(inode, bh_result,
 849							  tail_offset);
 850				if (retval) {
 851					if (retval != -ENOSPC)
 852						reiserfs_error(inode->i_sb,
 853							"clm-6004",
 854							"convert tail failed "
 855							"inode %lu, error %d",
 856							inode->i_ino,
 857							retval);
 858					if (allocated_block_nr) {
 859						/* the bitmap, the super, and the stat data == 3 */
 
 
 
 860						if (!th)
 861							th = reiserfs_persistent_transaction(inode->i_sb, 3);
 862						if (th)
 863							reiserfs_free_block(th,
 864									    inode,
 865									    allocated_block_nr,
 866									    1);
 867					}
 868					goto failure;
 869				}
 870				goto research;
 871			}
 872			retval =
 873			    direct2indirect(th, inode, &path, unbh,
 874					    tail_offset);
 875			if (retval) {
 876				reiserfs_unmap_buffer(unbh);
 877				reiserfs_free_block(th, inode,
 878						    allocated_block_nr, 1);
 879				goto failure;
 880			}
 881			/* it is important the set_buffer_uptodate is done after
 882			 ** the direct2indirect.  The buffer might contain valid
 883			 ** data newer than the data on disk (read by readpage, changed,
 884			 ** and then sent here by writepage).  direct2indirect needs
 885			 ** to know if unbh was already up to date, so it can decide
 886			 ** if the data in unbh needs to be replaced with data from
 887			 ** the disk
 
 
 888			 */
 889			set_buffer_uptodate(unbh);
 890
 891			/* unbh->b_page == NULL in case of DIRECT_IO request, this means
 892			   buffer will disappear shortly, so it should not be added to
 
 
 893			 */
 894			if (unbh->b_page) {
 895				/* we've converted the tail, so we must
 896				 ** flush unbh before the transaction commits
 
 897				 */
 898				reiserfs_add_tail_list(inode, unbh);
 899
 900				/* mark it dirty now to prevent commit_write from adding
 901				 ** this buffer to the inode's dirty buffer list
 
 
 902				 */
 903				/*
 904				 * AKPM: changed __mark_buffer_dirty to mark_buffer_dirty().
 905				 * It's still atomic, but it sets the page dirty too,
 906				 * which makes it eligible for writeback at any time by the
 907				 * VM (which was also the case with __mark_buffer_dirty())
 
 
 908				 */
 909				mark_buffer_dirty(unbh);
 910			}
 911		} else {
 912			/* append indirect item with holes if needed, when appending
 913			   pointer to 'block'-th block use block, which is already
 914			   allocated */
 
 
 915			struct cpu_key tmp_key;
 916			unp_t unf_single = 0;	// We use this in case we need to allocate only
 917			// one block which is a fastpath
 
 
 
 918			unp_t *un;
 919			__u64 max_to_insert =
 920			    MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
 921			    UNFM_P_SIZE;
 922			__u64 blocks_needed;
 923
 924			RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
 925			       "vs-804: invalid position for append");
 926			/* indirect item has to be appended, set up key of that position */
 
 
 
 
 927			make_cpu_key(&tmp_key, inode,
 928				     le_key_k_offset(version,
 929						     &(ih->ih_key)) +
 930				     op_bytes_number(ih,
 931						     inode->i_sb->s_blocksize),
 932				     //pos_in_item * inode->i_sb->s_blocksize,
 933				     TYPE_INDIRECT, 3);	// key type is unimportant
 934
 935			RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
 936			       "green-805: invalid offset");
 937			blocks_needed =
 938			    1 +
 939			    ((cpu_key_k_offset(&key) -
 940			      cpu_key_k_offset(&tmp_key)) >> inode->i_sb->
 941			     s_blocksize_bits);
 942
 943			if (blocks_needed == 1) {
 944				un = &unf_single;
 945			} else {
 946				un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_NOFS);
 
 947				if (!un) {
 948					un = &unf_single;
 949					blocks_needed = 1;
 950					max_to_insert = 0;
 951				}
 952			}
 953			if (blocks_needed <= max_to_insert) {
 954				/* we are going to add target block to the file. Use allocated
 955				   block for that */
 
 
 956				un[blocks_needed - 1] =
 957				    cpu_to_le32(allocated_block_nr);
 958				set_block_dev_mapped(bh_result,
 959						     allocated_block_nr, inode);
 960				set_buffer_new(bh_result);
 961				done = 1;
 962			} else {
 963				/* paste hole to the indirect item */
 964				/* If kmalloc failed, max_to_insert becomes zero and it means we
 965				   only have space for one block */
 
 
 
 966				blocks_needed =
 967				    max_to_insert ? max_to_insert : 1;
 968			}
 969			retval =
 970			    reiserfs_paste_into_item(th, &path, &tmp_key, inode,
 971						     (char *)un,
 972						     UNFM_P_SIZE *
 973						     blocks_needed);
 974
 975			if (blocks_needed != 1)
 976				kfree(un);
 977
 978			if (retval) {
 979				reiserfs_free_block(th, inode,
 980						    allocated_block_nr, 1);
 981				goto failure;
 982			}
 983			if (!done) {
 984				/* We need to mark new file size in case this function will be
 985				   interrupted/aborted later on. And we may do this only for
 986				   holes. */
 
 
 
 987				inode->i_size +=
 988				    inode->i_sb->s_blocksize * blocks_needed;
 989			}
 990		}
 991
 992		if (done == 1)
 993			break;
 994
 995		/* this loop could log more blocks than we had originally asked
 996		 ** for.  So, we have to allow the transaction to end if it is
 997		 ** too big or too full.  Update the inode so things are
 998		 ** consistent if we crash before the function returns
 999		 **
1000		 ** release the path so that anybody waiting on the path before
1001		 ** ending their transaction will be able to continue.
1002		 */
1003		if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
1004			retval = restart_transaction(th, inode, &path);
1005			if (retval)
1006				goto failure;
1007		}
1008		/*
1009		 * inserting indirect pointers for a hole can take a
1010		 * long time.  reschedule if needed and also release the write
1011		 * lock for others.
1012		 */
1013		if (need_resched()) {
1014			reiserfs_write_unlock_once(inode->i_sb, lock_depth);
1015			schedule();
1016			lock_depth = reiserfs_write_lock_once(inode->i_sb);
1017		}
1018
1019		retval = search_for_position_by_key(inode->i_sb, &key, &path);
1020		if (retval == IO_ERROR) {
1021			retval = -EIO;
1022			goto failure;
1023		}
1024		if (retval == POSITION_FOUND) {
1025			reiserfs_warning(inode->i_sb, "vs-825",
1026					 "%K should not be found", &key);
1027			retval = -EEXIST;
1028			if (allocated_block_nr)
1029				reiserfs_free_block(th, inode,
1030						    allocated_block_nr, 1);
1031			pathrelse(&path);
1032			goto failure;
1033		}
1034		bh = get_last_bh(&path);
1035		ih = get_ih(&path);
1036		item = get_item(&path);
1037		pos_in_item = path.pos_in_item;
1038	} while (1);
1039
1040	retval = 0;
1041
1042      failure:
1043	if (th && (!dangle || (retval && !th->t_trans_id))) {
1044		int err;
1045		if (th->t_trans_id)
1046			reiserfs_update_sd(th, inode);
1047		err = reiserfs_end_persistent_transaction(th);
1048		if (err)
1049			retval = err;
1050	}
1051
1052	reiserfs_write_unlock_once(inode->i_sb, lock_depth);
1053	reiserfs_check_path(&path);
1054	return retval;
1055}
1056
1057static int
1058reiserfs_readpages(struct file *file, struct address_space *mapping,
1059		   struct list_head *pages, unsigned nr_pages)
1060{
1061	return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
1062}
1063
1064/* Compute real number of used bytes by file
1065 * Following three functions can go away when we'll have enough space in stat item
 
 
1066 */
1067static int real_space_diff(struct inode *inode, int sd_size)
1068{
1069	int bytes;
1070	loff_t blocksize = inode->i_sb->s_blocksize;
1071
1072	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
1073		return sd_size;
1074
1075	/* End of file is also in full block with indirect reference, so round
1076	 ** up to the next block.
1077	 **
1078	 ** there is just no way to know if the tail is actually packed
1079	 ** on the file, so we have to assume it isn't.  When we pack the
1080	 ** tail, we add 4 bytes to pretend there really is an unformatted
1081	 ** node pointer
 
1082	 */
1083	bytes =
1084	    ((inode->i_size +
1085	      (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +
1086	    sd_size;
1087	return bytes;
1088}
1089
1090static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
1091					int sd_size)
1092{
1093	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1094		return inode->i_size +
1095		    (loff_t) (real_space_diff(inode, sd_size));
1096	}
1097	return ((loff_t) real_space_diff(inode, sd_size)) +
1098	    (((loff_t) blocks) << 9);
1099}
1100
1101/* Compute number of blocks used by file in ReiserFS counting */
1102static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
1103{
1104	loff_t bytes = inode_get_bytes(inode);
1105	loff_t real_space = real_space_diff(inode, sd_size);
1106
1107	/* keeps fsck and non-quota versions of reiserfs happy */
1108	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1109		bytes += (loff_t) 511;
1110	}
1111
1112	/* files from before the quota patch might i_blocks such that
1113	 ** bytes < real_space.  Deal with that here to prevent it from
1114	 ** going negative.
 
1115	 */
1116	if (bytes < real_space)
1117		return 0;
1118	return (bytes - real_space) >> 9;
1119}
1120
1121//
1122// BAD: new directories have stat data of new type and all other items
1123// of old type. Version stored in the inode says about body items, so
1124// in update_stat_data we can not rely on inode, but have to check
1125// item version directly
1126//
1127
1128// called by read_locked_inode
1129static void init_inode(struct inode *inode, struct treepath *path)
1130{
1131	struct buffer_head *bh;
1132	struct item_head *ih;
1133	__u32 rdev;
1134	//int version = ITEM_VERSION_1;
1135
1136	bh = PATH_PLAST_BUFFER(path);
1137	ih = PATH_PITEM_HEAD(path);
1138
1139	copy_key(INODE_PKEY(inode), &(ih->ih_key));
1140
1141	INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list));
1142	REISERFS_I(inode)->i_flags = 0;
1143	REISERFS_I(inode)->i_prealloc_block = 0;
1144	REISERFS_I(inode)->i_prealloc_count = 0;
1145	REISERFS_I(inode)->i_trans_id = 0;
1146	REISERFS_I(inode)->i_jl = NULL;
1147	reiserfs_init_xattr_rwsem(inode);
1148
1149	if (stat_data_v1(ih)) {
1150		struct stat_data_v1 *sd =
1151		    (struct stat_data_v1 *)B_I_PITEM(bh, ih);
1152		unsigned long blocks;
1153
1154		set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1155		set_inode_sd_version(inode, STAT_DATA_V1);
1156		inode->i_mode = sd_v1_mode(sd);
1157		inode->i_nlink = sd_v1_nlink(sd);
1158		inode->i_uid = sd_v1_uid(sd);
1159		inode->i_gid = sd_v1_gid(sd);
1160		inode->i_size = sd_v1_size(sd);
1161		inode->i_atime.tv_sec = sd_v1_atime(sd);
1162		inode->i_mtime.tv_sec = sd_v1_mtime(sd);
1163		inode->i_ctime.tv_sec = sd_v1_ctime(sd);
1164		inode->i_atime.tv_nsec = 0;
1165		inode->i_ctime.tv_nsec = 0;
1166		inode->i_mtime.tv_nsec = 0;
1167
1168		inode->i_blocks = sd_v1_blocks(sd);
1169		inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1170		blocks = (inode->i_size + 511) >> 9;
1171		blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
 
 
 
 
 
 
 
 
 
1172		if (inode->i_blocks > blocks) {
1173			// there was a bug in <=3.5.23 when i_blocks could take negative
1174			// values. Starting from 3.5.17 this value could even be stored in
1175			// stat data. For such files we set i_blocks based on file
1176			// size. Just 2 notes: this can be wrong for sparce files. On-disk value will be
1177			// only updated if file's inode will ever change
1178			inode->i_blocks = blocks;
1179		}
1180
1181		rdev = sd_v1_rdev(sd);
1182		REISERFS_I(inode)->i_first_direct_byte =
1183		    sd_v1_first_direct_byte(sd);
1184		/* an early bug in the quota code can give us an odd number for the
1185		 ** block count.  This is incorrect, fix it here.
 
 
1186		 */
1187		if (inode->i_blocks & 1) {
1188			inode->i_blocks++;
1189		}
1190		inode_set_bytes(inode,
1191				to_real_used_space(inode, inode->i_blocks,
1192						   SD_V1_SIZE));
1193		/* nopack is initially zero for v1 objects. For v2 objects,
1194		   nopack is initialised from sd_attrs */
 
 
1195		REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
1196	} else {
1197		// new stat data found, but object may have old items
1198		// (directories and symlinks)
1199		struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih);
 
 
1200
1201		inode->i_mode = sd_v2_mode(sd);
1202		inode->i_nlink = sd_v2_nlink(sd);
1203		inode->i_uid = sd_v2_uid(sd);
1204		inode->i_size = sd_v2_size(sd);
1205		inode->i_gid = sd_v2_gid(sd);
1206		inode->i_mtime.tv_sec = sd_v2_mtime(sd);
1207		inode->i_atime.tv_sec = sd_v2_atime(sd);
1208		inode->i_ctime.tv_sec = sd_v2_ctime(sd);
1209		inode->i_ctime.tv_nsec = 0;
1210		inode->i_mtime.tv_nsec = 0;
1211		inode->i_atime.tv_nsec = 0;
1212		inode->i_blocks = sd_v2_blocks(sd);
1213		rdev = sd_v2_rdev(sd);
1214		if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1215			inode->i_generation =
1216			    le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1217		else
1218			inode->i_generation = sd_v2_generation(sd);
1219
1220		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1221			set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1222		else
1223			set_inode_item_key_version(inode, KEY_FORMAT_3_6);
1224		REISERFS_I(inode)->i_first_direct_byte = 0;
1225		set_inode_sd_version(inode, STAT_DATA_V2);
1226		inode_set_bytes(inode,
1227				to_real_used_space(inode, inode->i_blocks,
1228						   SD_V2_SIZE));
1229		/* read persistent inode attributes from sd and initialise
1230		   generic inode flags from them */
 
 
1231		REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
1232		sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
1233	}
1234
1235	pathrelse(path);
1236	if (S_ISREG(inode->i_mode)) {
1237		inode->i_op = &reiserfs_file_inode_operations;
1238		inode->i_fop = &reiserfs_file_operations;
1239		inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1240	} else if (S_ISDIR(inode->i_mode)) {
1241		inode->i_op = &reiserfs_dir_inode_operations;
1242		inode->i_fop = &reiserfs_dir_operations;
1243	} else if (S_ISLNK(inode->i_mode)) {
1244		inode->i_op = &reiserfs_symlink_inode_operations;
 
1245		inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1246	} else {
1247		inode->i_blocks = 0;
1248		inode->i_op = &reiserfs_special_inode_operations;
1249		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
1250	}
1251}
1252
1253// update new stat data with inode fields
1254static void inode2sd(void *sd, struct inode *inode, loff_t size)
1255{
1256	struct stat_data *sd_v2 = (struct stat_data *)sd;
1257	__u16 flags;
1258
1259	set_sd_v2_mode(sd_v2, inode->i_mode);
1260	set_sd_v2_nlink(sd_v2, inode->i_nlink);
1261	set_sd_v2_uid(sd_v2, inode->i_uid);
1262	set_sd_v2_size(sd_v2, size);
1263	set_sd_v2_gid(sd_v2, inode->i_gid);
1264	set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
1265	set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
1266	set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
1267	set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
1268	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1269		set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
1270	else
1271		set_sd_v2_generation(sd_v2, inode->i_generation);
1272	flags = REISERFS_I(inode)->i_attrs;
1273	i_attrs_to_sd_attrs(inode, &flags);
1274	set_sd_v2_attrs(sd_v2, flags);
1275}
1276
1277// used to copy inode's fields to old stat data
1278static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
1279{
1280	struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
1281
1282	set_sd_v1_mode(sd_v1, inode->i_mode);
1283	set_sd_v1_uid(sd_v1, inode->i_uid);
1284	set_sd_v1_gid(sd_v1, inode->i_gid);
1285	set_sd_v1_nlink(sd_v1, inode->i_nlink);
1286	set_sd_v1_size(sd_v1, size);
1287	set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
1288	set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
1289	set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
1290
1291	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1292		set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
1293	else
1294		set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
1295
1296	// Sigh. i_first_direct_byte is back
1297	set_sd_v1_first_direct_byte(sd_v1,
1298				    REISERFS_I(inode)->i_first_direct_byte);
1299}
1300
1301/* NOTE, you must prepare the buffer head before sending it here,
1302** and then log it after the call
1303*/
 
1304static void update_stat_data(struct treepath *path, struct inode *inode,
1305			     loff_t size)
1306{
1307	struct buffer_head *bh;
1308	struct item_head *ih;
1309
1310	bh = PATH_PLAST_BUFFER(path);
1311	ih = PATH_PITEM_HEAD(path);
1312
1313	if (!is_statdata_le_ih(ih))
1314		reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
1315			       INODE_PKEY(inode), ih);
1316
 
1317	if (stat_data_v1(ih)) {
1318		// path points to old stat data
1319		inode2sd_v1(B_I_PITEM(bh, ih), inode, size);
1320	} else {
1321		inode2sd(B_I_PITEM(bh, ih), inode, size);
1322	}
1323
1324	return;
1325}
1326
1327void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
1328			     struct inode *inode, loff_t size)
1329{
1330	struct cpu_key key;
1331	INITIALIZE_PATH(path);
1332	struct buffer_head *bh;
1333	int fs_gen;
1334	struct item_head *ih, tmp_ih;
1335	int retval;
1336
1337	BUG_ON(!th->t_trans_id);
1338
1339	make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);	//key type is unimportant
 
1340
1341	for (;;) {
1342		int pos;
1343		/* look for the object's stat data */
1344		retval = search_item(inode->i_sb, &key, &path);
1345		if (retval == IO_ERROR) {
1346			reiserfs_error(inode->i_sb, "vs-13050",
1347				       "i/o failure occurred trying to "
1348				       "update %K stat data", &key);
1349			return;
1350		}
1351		if (retval == ITEM_NOT_FOUND) {
1352			pos = PATH_LAST_POSITION(&path);
1353			pathrelse(&path);
1354			if (inode->i_nlink == 0) {
1355				/*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */
1356				return;
1357			}
1358			reiserfs_warning(inode->i_sb, "vs-13060",
1359					 "stat data of object %k (nlink == %d) "
1360					 "not found (pos %d)",
1361					 INODE_PKEY(inode), inode->i_nlink,
1362					 pos);
1363			reiserfs_check_path(&path);
1364			return;
1365		}
1366
1367		/* sigh, prepare_for_journal might schedule.  When it schedules the
1368		 ** FS might change.  We have to detect that, and loop back to the
1369		 ** search if the stat data item has moved
 
1370		 */
1371		bh = get_last_bh(&path);
1372		ih = get_ih(&path);
1373		copy_item_head(&tmp_ih, ih);
1374		fs_gen = get_generation(inode->i_sb);
1375		reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
 
 
1376		if (fs_changed(fs_gen, inode->i_sb)
1377		    && item_moved(&tmp_ih, &path)) {
1378			reiserfs_restore_prepared_buffer(inode->i_sb, bh);
1379			continue;	/* Stat_data item has been moved after scheduling. */
1380		}
1381		break;
1382	}
1383	update_stat_data(&path, inode, size);
1384	journal_mark_dirty(th, th->t_super, bh);
1385	pathrelse(&path);
1386	return;
1387}
1388
1389/* reiserfs_read_locked_inode is called to read the inode off disk, and it
1390** does a make_bad_inode when things go wrong.  But, we need to make sure
1391** and clear the key in the private portion of the inode, otherwise a
1392** corresponding iput might try to delete whatever object the inode last
1393** represented.
1394*/
 
1395static void reiserfs_make_bad_inode(struct inode *inode)
1396{
1397	memset(INODE_PKEY(inode), 0, KEY_SIZE);
1398	make_bad_inode(inode);
1399}
1400
1401//
1402// initially this function was derived from minix or ext2's analog and
1403// evolved as the prototype did
1404//
1405
1406int reiserfs_init_locked_inode(struct inode *inode, void *p)
1407{
1408	struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
1409	inode->i_ino = args->objectid;
1410	INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
1411	return 0;
1412}
1413
1414/* looks for stat data in the tree, and fills up the fields of in-core
1415   inode stat data fields */
 
 
1416void reiserfs_read_locked_inode(struct inode *inode,
1417				struct reiserfs_iget_args *args)
1418{
1419	INITIALIZE_PATH(path_to_sd);
1420	struct cpu_key key;
1421	unsigned long dirino;
1422	int retval;
1423
1424	dirino = args->dirid;
1425
1426	/* set version 1, version 2 could be used too, because stat data
1427	   key is the same in both versions */
 
 
1428	key.version = KEY_FORMAT_3_5;
1429	key.on_disk_key.k_dir_id = dirino;
1430	key.on_disk_key.k_objectid = inode->i_ino;
1431	key.on_disk_key.k_offset = 0;
1432	key.on_disk_key.k_type = 0;
1433
1434	/* look for the object's stat data */
1435	retval = search_item(inode->i_sb, &key, &path_to_sd);
1436	if (retval == IO_ERROR) {
1437		reiserfs_error(inode->i_sb, "vs-13070",
1438			       "i/o failure occurred trying to find "
1439			       "stat data of %K", &key);
1440		reiserfs_make_bad_inode(inode);
1441		return;
1442	}
 
 
1443	if (retval != ITEM_FOUND) {
1444		/* a stale NFS handle can trigger this without it being an error */
1445		pathrelse(&path_to_sd);
1446		reiserfs_make_bad_inode(inode);
1447		inode->i_nlink = 0;
1448		return;
1449	}
1450
1451	init_inode(inode, &path_to_sd);
1452
1453	/* It is possible that knfsd is trying to access inode of a file
1454	   that is being removed from the disk by some other thread. As we
1455	   update sd on unlink all that is required is to check for nlink
1456	   here. This bug was first found by Sizif when debugging
1457	   SquidNG/Butterfly, forgotten, and found again after Philippe
1458	   Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
1459
1460	   More logical fix would require changes in fs/inode.c:iput() to
1461	   remove inode from hash-table _after_ fs cleaned disk stuff up and
1462	   in iget() to return NULL if I_FREEING inode is found in
1463	   hash-table. */
1464	/* Currently there is one place where it's ok to meet inode with
1465	   nlink==0: processing of open-unlinked and half-truncated files
1466	   during mount (fs/reiserfs/super.c:finish_unfinished()). */
 
 
 
 
 
1467	if ((inode->i_nlink == 0) &&
1468	    !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
1469		reiserfs_warning(inode->i_sb, "vs-13075",
1470				 "dead inode read from disk %K. "
1471				 "This is likely to be race with knfsd. Ignore",
1472				 &key);
1473		reiserfs_make_bad_inode(inode);
1474	}
1475
1476	reiserfs_check_path(&path_to_sd);	/* init inode should be relsing */
 
1477
1478	/*
1479	 * Stat data v1 doesn't support ACLs.
1480	 */
1481	if (get_inode_sd_version(inode) == STAT_DATA_V1)
1482		cache_no_acl(inode);
1483}
1484
1485/**
1486 * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
1487 *
1488 * @inode:    inode from hash table to check
1489 * @opaque:   "cookie" passed to iget5_locked(). This is &reiserfs_iget_args.
1490 *
1491 * This function is called by iget5_locked() to distinguish reiserfs inodes
1492 * having the same inode numbers. Such inodes can only exist due to some
1493 * error condition. One of them should be bad. Inodes with identical
1494 * inode numbers (objectids) are distinguished by parent directory ids.
1495 *
1496 */
1497int reiserfs_find_actor(struct inode *inode, void *opaque)
1498{
1499	struct reiserfs_iget_args *args;
1500
1501	args = opaque;
1502	/* args is already in CPU order */
1503	return (inode->i_ino == args->objectid) &&
1504	    (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
1505}
1506
1507struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1508{
1509	struct inode *inode;
1510	struct reiserfs_iget_args args;
 
1511
1512	args.objectid = key->on_disk_key.k_objectid;
1513	args.dirid = key->on_disk_key.k_dir_id;
1514	reiserfs_write_unlock(s);
1515	inode = iget5_locked(s, key->on_disk_key.k_objectid,
1516			     reiserfs_find_actor, reiserfs_init_locked_inode,
1517			     (void *)(&args));
1518	reiserfs_write_lock(s);
1519	if (!inode)
1520		return ERR_PTR(-ENOMEM);
1521
1522	if (inode->i_state & I_NEW) {
1523		reiserfs_read_locked_inode(inode, &args);
1524		unlock_new_inode(inode);
1525	}
1526
1527	if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) {
1528		/* either due to i/o error or a stale NFS handle */
1529		iput(inode);
1530		inode = NULL;
1531	}
1532	return inode;
1533}
1534
1535static struct dentry *reiserfs_get_dentry(struct super_block *sb,
1536	u32 objectid, u32 dir_id, u32 generation)
1537
1538{
1539	struct cpu_key key;
1540	struct inode *inode;
1541
1542	key.on_disk_key.k_objectid = objectid;
1543	key.on_disk_key.k_dir_id = dir_id;
1544	reiserfs_write_lock(sb);
1545	inode = reiserfs_iget(sb, &key);
1546	if (inode && !IS_ERR(inode) && generation != 0 &&
1547	    generation != inode->i_generation) {
1548		iput(inode);
1549		inode = NULL;
1550	}
1551	reiserfs_write_unlock(sb);
1552
1553	return d_obtain_alias(inode);
1554}
1555
1556struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1557		int fh_len, int fh_type)
1558{
1559	/* fhtype happens to reflect the number of u32s encoded.
 
1560	 * due to a bug in earlier code, fhtype might indicate there
1561	 * are more u32s then actually fitted.
1562	 * so if fhtype seems to be more than len, reduce fhtype.
1563	 * Valid types are:
1564	 *   2 - objectid + dir_id - legacy support
1565	 *   3 - objectid + dir_id + generation
1566	 *   4 - objectid + dir_id + objectid and dirid of parent - legacy
1567	 *   5 - objectid + dir_id + generation + objectid and dirid of parent
1568	 *   6 - as above plus generation of directory
1569	 * 6 does not fit in NFSv2 handles
1570	 */
1571	if (fh_type > fh_len) {
1572		if (fh_type != 6 || fh_len != 5)
1573			reiserfs_warning(sb, "reiserfs-13077",
1574				"nfsd/reiserfs, fhtype=%d, len=%d - odd",
1575				fh_type, fh_len);
1576		fh_type = 5;
1577	}
 
 
1578
1579	return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1580		(fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1581}
1582
1583struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1584		int fh_len, int fh_type)
1585{
 
 
1586	if (fh_type < 4)
1587		return NULL;
1588
1589	return reiserfs_get_dentry(sb,
1590		(fh_type >= 5) ? fid->raw[3] : fid->raw[2],
1591		(fh_type >= 5) ? fid->raw[4] : fid->raw[3],
1592		(fh_type == 6) ? fid->raw[5] : 0);
1593}
1594
1595int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
1596		       int need_parent)
1597{
1598	struct inode *inode = dentry->d_inode;
1599	int maxlen = *lenp;
1600
1601	if (need_parent && (maxlen < 5)) {
1602		*lenp = 5;
1603		return 255;
1604	} else if (maxlen < 3) {
1605		*lenp = 3;
1606		return 255;
1607	}
1608
1609	data[0] = inode->i_ino;
1610	data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1611	data[2] = inode->i_generation;
1612	*lenp = 3;
1613	/* no room for directory info? return what we've stored so far */
1614	if (maxlen < 5 || !need_parent)
1615		return 3;
1616
1617	spin_lock(&dentry->d_lock);
1618	inode = dentry->d_parent->d_inode;
1619	data[3] = inode->i_ino;
1620	data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1621	*lenp = 5;
1622	if (maxlen >= 6) {
1623		data[5] = inode->i_generation;
1624		*lenp = 6;
1625	}
1626	spin_unlock(&dentry->d_lock);
1627	return *lenp;
1628}
1629
1630/* looks for stat data, then copies fields to it, marks the buffer
1631   containing stat data as dirty */
1632/* reiserfs inodes are never really dirty, since the dirty inode call
1633** always logs them.  This call allows the VFS inode marking routines
1634** to properly mark inodes for datasync and such, but only actually
1635** does something when called for a synchronous update.
1636*/
 
 
 
1637int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1638{
1639	struct reiserfs_transaction_handle th;
1640	int jbegin_count = 1;
1641
1642	if (inode->i_sb->s_flags & MS_RDONLY)
1643		return -EROFS;
1644	/* memory pressure can sometimes initiate write_inode calls with sync == 1,
1645	 ** these cases are just when the system needs ram, not when the
1646	 ** inode needs to reach disk for safety, and they can safely be
1647	 ** ignored because the altered inode has already been logged.
 
 
1648	 */
1649	if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
1650		reiserfs_write_lock(inode->i_sb);
1651		if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
1652			reiserfs_update_sd(&th, inode);
1653			journal_end_sync(&th, inode->i_sb, jbegin_count);
1654		}
1655		reiserfs_write_unlock(inode->i_sb);
1656	}
1657	return 0;
1658}
1659
1660/* stat data of new object is inserted already, this inserts the item
1661   containing "." and ".." entries */
 
 
1662static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
1663				  struct inode *inode,
1664				  struct item_head *ih, struct treepath *path,
1665				  struct inode *dir)
1666{
1667	struct super_block *sb = th->t_super;
1668	char empty_dir[EMPTY_DIR_SIZE];
1669	char *body = empty_dir;
1670	struct cpu_key key;
1671	int retval;
1672
1673	BUG_ON(!th->t_trans_id);
1674
1675	_make_cpu_key(&key, KEY_FORMAT_3_5, le32_to_cpu(ih->ih_key.k_dir_id),
1676		      le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
1677		      TYPE_DIRENTRY, 3 /*key length */ );
1678
1679	/* compose item head for new item. Directories consist of items of
1680	   old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
1681	   is done by reiserfs_new_inode */
 
 
1682	if (old_format_only(sb)) {
1683		make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1684				  TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
1685
1686		make_empty_dir_item_v1(body, ih->ih_key.k_dir_id,
1687				       ih->ih_key.k_objectid,
1688				       INODE_PKEY(dir)->k_dir_id,
1689				       INODE_PKEY(dir)->k_objectid);
1690	} else {
1691		make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1692				  TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
1693
1694		make_empty_dir_item(body, ih->ih_key.k_dir_id,
1695				    ih->ih_key.k_objectid,
1696				    INODE_PKEY(dir)->k_dir_id,
1697				    INODE_PKEY(dir)->k_objectid);
1698	}
1699
1700	/* look for place in the tree for new item */
1701	retval = search_item(sb, &key, path);
1702	if (retval == IO_ERROR) {
1703		reiserfs_error(sb, "vs-13080",
1704			       "i/o failure occurred creating new directory");
1705		return -EIO;
1706	}
1707	if (retval == ITEM_FOUND) {
1708		pathrelse(path);
1709		reiserfs_warning(sb, "vs-13070",
1710				 "object with this key exists (%k)",
1711				 &(ih->ih_key));
1712		return -EEXIST;
1713	}
1714
1715	/* insert item, that is empty directory item */
1716	return reiserfs_insert_item(th, path, &key, ih, inode, body);
1717}
1718
1719/* stat data of object has been inserted, this inserts the item
1720   containing the body of symlink */
1721static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct inode *inode,	/* Inode of symlink */
 
 
 
1722				struct item_head *ih,
1723				struct treepath *path, const char *symname,
1724				int item_len)
1725{
1726	struct super_block *sb = th->t_super;
1727	struct cpu_key key;
1728	int retval;
1729
1730	BUG_ON(!th->t_trans_id);
1731
1732	_make_cpu_key(&key, KEY_FORMAT_3_5,
1733		      le32_to_cpu(ih->ih_key.k_dir_id),
1734		      le32_to_cpu(ih->ih_key.k_objectid),
1735		      1, TYPE_DIRECT, 3 /*key length */ );
1736
1737	make_le_item_head(ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len,
1738			  0 /*free_space */ );
1739
1740	/* look for place in the tree for new item */
1741	retval = search_item(sb, &key, path);
1742	if (retval == IO_ERROR) {
1743		reiserfs_error(sb, "vs-13080",
1744			       "i/o failure occurred creating new symlink");
1745		return -EIO;
1746	}
1747	if (retval == ITEM_FOUND) {
1748		pathrelse(path);
1749		reiserfs_warning(sb, "vs-13080",
1750				 "object with this key exists (%k)",
1751				 &(ih->ih_key));
1752		return -EEXIST;
1753	}
1754
1755	/* insert item, that is body of symlink */
1756	return reiserfs_insert_item(th, path, &key, ih, inode, symname);
1757}
1758
1759/* inserts the stat data into the tree, and then calls
1760   reiserfs_new_directory (to insert ".", ".." item if new object is
1761   directory) or reiserfs_new_symlink (to insert symlink body if new
1762   object is symlink) or nothing (if new object is regular file)
1763
1764   NOTE! uid and gid must already be set in the inode.  If we return
1765   non-zero due to an error, we have to drop the quota previously allocated
1766   for the fresh inode.  This can only be done outside a transaction, so
1767   if we return non-zero, we also end the transaction.  */
 
 
 
 
 
 
 
 
 
 
 
1768int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1769		       struct inode *dir, int mode, const char *symname,
1770		       /* 0 for regular, EMTRY_DIR_SIZE for dirs,
1771		          strlen (symname) for symlinks) */
1772		       loff_t i_size, struct dentry *dentry,
1773		       struct inode *inode,
1774		       struct reiserfs_security_handle *security)
1775{
1776	struct super_block *sb;
1777	struct reiserfs_iget_args args;
1778	INITIALIZE_PATH(path_to_key);
1779	struct cpu_key key;
1780	struct item_head ih;
1781	struct stat_data sd;
1782	int retval;
1783	int err;
 
1784
1785	BUG_ON(!th->t_trans_id);
1786
1787	dquot_initialize(inode);
1788	err = dquot_alloc_inode(inode);
 
1789	if (err)
1790		goto out_end_trans;
1791	if (!dir->i_nlink) {
1792		err = -EPERM;
1793		goto out_bad_inode;
1794	}
1795
1796	sb = dir->i_sb;
1797
1798	/* item head of new item */
1799	ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
1800	ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
1801	if (!ih.ih_key.k_objectid) {
1802		err = -ENOMEM;
1803		goto out_bad_inode;
1804	}
1805	args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
1806	if (old_format_only(sb))
1807		make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET,
1808				  TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
1809	else
1810		make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET,
1811				  TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
1812	memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
1813	args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
1814	if (insert_inode_locked4(inode, args.objectid,
1815			     reiserfs_find_actor, &args) < 0) {
 
 
 
 
1816		err = -EINVAL;
1817		goto out_bad_inode;
1818	}
 
1819	if (old_format_only(sb))
1820		/* not a perfect generation count, as object ids can be reused, but
1821		 ** this is as good as reiserfs can do right now.
1822		 ** note that the private part of inode isn't filled in yet, we have
1823		 ** to use the directory.
 
1824		 */
1825		inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
1826	else
1827#if defined( USE_INODE_GENERATION_COUNTER )
1828		inode->i_generation =
1829		    le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
1830#else
1831		inode->i_generation = ++event;
1832#endif
1833
1834	/* fill stat data */
1835	inode->i_nlink = (S_ISDIR(mode) ? 2 : 1);
1836
1837	/* uid and gid must already be set by the caller for quota init */
1838
1839	/* symlink cannot be immutable or append only, right? */
1840	if (S_ISLNK(inode->i_mode))
1841		inode->i_flags &= ~(S_IMMUTABLE | S_APPEND);
1842
1843	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
1844	inode->i_size = i_size;
1845	inode->i_blocks = 0;
1846	inode->i_bytes = 0;
1847	REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
1848	    U32_MAX /*NO_BYTES_IN_DIRECT_ITEM */ ;
1849
1850	INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list));
1851	REISERFS_I(inode)->i_flags = 0;
1852	REISERFS_I(inode)->i_prealloc_block = 0;
1853	REISERFS_I(inode)->i_prealloc_count = 0;
1854	REISERFS_I(inode)->i_trans_id = 0;
1855	REISERFS_I(inode)->i_jl = NULL;
1856	REISERFS_I(inode)->i_attrs =
1857	    REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
1858	sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
1859	reiserfs_init_xattr_rwsem(inode);
1860
1861	/* key to search for correct place for new stat data */
1862	_make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id),
1863		      le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET,
1864		      TYPE_STAT_DATA, 3 /*key length */ );
1865
1866	/* find proper place for inserting of stat data */
1867	retval = search_item(sb, &key, &path_to_key);
1868	if (retval == IO_ERROR) {
1869		err = -EIO;
1870		goto out_bad_inode;
1871	}
1872	if (retval == ITEM_FOUND) {
1873		pathrelse(&path_to_key);
1874		err = -EEXIST;
1875		goto out_bad_inode;
1876	}
1877	if (old_format_only(sb)) {
1878		if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) {
 
1879			pathrelse(&path_to_key);
1880			/* i_uid or i_gid is too big to be stored in stat data v3.5 */
1881			err = -EINVAL;
1882			goto out_bad_inode;
1883		}
1884		inode2sd_v1(&sd, inode, inode->i_size);
1885	} else {
1886		inode2sd(&sd, inode, inode->i_size);
1887	}
1888	// store in in-core inode the key of stat data and version all
1889	// object items will have (directory items will have old offset
1890	// format, other new objects will consist of new items)
 
 
1891	if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
1892		set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1893	else
1894		set_inode_item_key_version(inode, KEY_FORMAT_3_6);
1895	if (old_format_only(sb))
1896		set_inode_sd_version(inode, STAT_DATA_V1);
1897	else
1898		set_inode_sd_version(inode, STAT_DATA_V2);
1899
1900	/* insert the stat data into the tree */
1901#ifdef DISPLACE_NEW_PACKING_LOCALITIES
1902	if (REISERFS_I(dir)->new_packing_locality)
1903		th->displace_new_blocks = 1;
1904#endif
1905	retval =
1906	    reiserfs_insert_item(th, &path_to_key, &key, &ih, inode,
1907				 (char *)(&sd));
1908	if (retval) {
1909		err = retval;
1910		reiserfs_check_path(&path_to_key);
1911		goto out_bad_inode;
1912	}
1913#ifdef DISPLACE_NEW_PACKING_LOCALITIES
1914	if (!th->displace_new_blocks)
1915		REISERFS_I(dir)->new_packing_locality = 0;
1916#endif
1917	if (S_ISDIR(mode)) {
1918		/* insert item with "." and ".." */
1919		retval =
1920		    reiserfs_new_directory(th, inode, &ih, &path_to_key, dir);
1921	}
1922
1923	if (S_ISLNK(mode)) {
1924		/* insert body of symlink */
1925		if (!old_format_only(sb))
1926			i_size = ROUND_UP(i_size);
1927		retval =
1928		    reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname,
1929					 i_size);
1930	}
1931	if (retval) {
1932		err = retval;
1933		reiserfs_check_path(&path_to_key);
1934		journal_end(th, th->t_super, th->t_blocks_allocated);
1935		goto out_inserted_sd;
1936	}
1937
1938	if (reiserfs_posixacl(inode->i_sb)) {
 
1939		retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
 
1940		if (retval) {
1941			err = retval;
1942			reiserfs_check_path(&path_to_key);
1943			journal_end(th, th->t_super, th->t_blocks_allocated);
1944			goto out_inserted_sd;
1945		}
1946	} else if (inode->i_sb->s_flags & MS_POSIXACL) {
1947		reiserfs_warning(inode->i_sb, "jdm-13090",
1948				 "ACLs aren't enabled in the fs, "
1949				 "but vfs thinks they are!");
1950	} else if (IS_PRIVATE(dir))
1951		inode->i_flags |= S_PRIVATE;
1952
1953	if (security->name) {
 
1954		retval = reiserfs_security_write(th, inode, security);
 
1955		if (retval) {
1956			err = retval;
1957			reiserfs_check_path(&path_to_key);
1958			retval = journal_end(th, th->t_super,
1959					     th->t_blocks_allocated);
1960			if (retval)
1961				err = retval;
1962			goto out_inserted_sd;
1963		}
1964	}
1965
1966	reiserfs_update_sd(th, inode);
1967	reiserfs_check_path(&path_to_key);
1968
1969	return 0;
1970
1971/* it looks like you can easily compress these two goto targets into
1972 * one.  Keeping it like this doesn't actually hurt anything, and they
1973 * are place holders for what the quota code actually needs.
1974 */
1975      out_bad_inode:
1976	/* Invalidate the object, nothing was inserted yet */
1977	INODE_PKEY(inode)->k_objectid = 0;
1978
1979	/* Quota change must be inside a transaction for journaling */
 
1980	dquot_free_inode(inode);
 
1981
1982      out_end_trans:
1983	journal_end(th, th->t_super, th->t_blocks_allocated);
1984	/* Drop can be outside and it needs more credits so it's better to have it outside */
 
 
 
 
1985	dquot_drop(inode);
 
1986	inode->i_flags |= S_NOQUOTA;
1987	make_bad_inode(inode);
1988
1989      out_inserted_sd:
1990	inode->i_nlink = 0;
1991	th->t_trans_id = 0;	/* so the caller can't use this handle later */
1992	unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
1993	iput(inode);
1994	return err;
1995}
1996
1997/*
1998** finds the tail page in the page cache,
1999** reads the last block in.
2000**
2001** On success, page_result is set to a locked, pinned page, and bh_result
2002** is set to an up to date buffer for the last block in the file.  returns 0.
2003**
2004** tail conversion is not done, so bh_result might not be valid for writing
2005** check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
2006** trying to write the block.
2007**
2008** on failure, nonzero is returned, page_result and bh_result are untouched.
2009*/
2010static int grab_tail_page(struct inode *inode,
2011			  struct page **page_result,
2012			  struct buffer_head **bh_result)
2013{
2014
2015	/* we want the page with the last byte in the file,
2016	 ** not the page that will hold the next byte for appending
 
2017	 */
2018	unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
2019	unsigned long pos = 0;
2020	unsigned long start = 0;
2021	unsigned long blocksize = inode->i_sb->s_blocksize;
2022	unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1);
2023	struct buffer_head *bh;
2024	struct buffer_head *head;
2025	struct page *page;
2026	int error;
2027
2028	/* we know that we are only called with inode->i_size > 0.
2029	 ** we also know that a file tail can never be as big as a block
2030	 ** If i_size % blocksize == 0, our file is currently block aligned
2031	 ** and it won't need converting or zeroing after a truncate.
 
2032	 */
2033	if ((offset & (blocksize - 1)) == 0) {
2034		return -ENOENT;
2035	}
2036	page = grab_cache_page(inode->i_mapping, index);
2037	error = -ENOMEM;
2038	if (!page) {
2039		goto out;
2040	}
2041	/* start within the page of the last block in the file */
2042	start = (offset / blocksize) * blocksize;
2043
2044	error = __block_write_begin(page, start, offset - start,
2045				    reiserfs_get_block_create_0);
2046	if (error)
2047		goto unlock;
2048
2049	head = page_buffers(page);
2050	bh = head;
2051	do {
2052		if (pos >= start) {
2053			break;
2054		}
2055		bh = bh->b_this_page;
2056		pos += blocksize;
2057	} while (bh != head);
2058
2059	if (!buffer_uptodate(bh)) {
2060		/* note, this should never happen, prepare_write should
2061		 ** be taking care of this for us.  If the buffer isn't up to date,
2062		 ** I've screwed up the code to find the buffer, or the code to
2063		 ** call prepare_write
 
2064		 */
2065		reiserfs_error(inode->i_sb, "clm-6000",
2066			       "error reading block %lu", bh->b_blocknr);
2067		error = -EIO;
2068		goto unlock;
2069	}
2070	*bh_result = bh;
2071	*page_result = page;
2072
2073      out:
2074	return error;
2075
2076      unlock:
2077	unlock_page(page);
2078	page_cache_release(page);
2079	return error;
2080}
2081
2082/*
2083** vfs version of truncate file.  Must NOT be called with
2084** a transaction already started.
2085**
2086** some code taken from block_truncate_page
2087*/
2088int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2089{
2090	struct reiserfs_transaction_handle th;
2091	/* we want the offset for the first byte after the end of the file */
2092	unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
2093	unsigned blocksize = inode->i_sb->s_blocksize;
2094	unsigned length;
2095	struct page *page = NULL;
2096	int error;
2097	struct buffer_head *bh = NULL;
2098	int err2;
2099	int lock_depth;
2100
2101	lock_depth = reiserfs_write_lock_once(inode->i_sb);
2102
2103	if (inode->i_size > 0) {
2104		error = grab_tail_page(inode, &page, &bh);
2105		if (error) {
2106			// -ENOENT means we truncated past the end of the file,
2107			// and get_block_create_0 could not find a block to read in,
2108			// which is ok.
 
 
2109			if (error != -ENOENT)
2110				reiserfs_error(inode->i_sb, "clm-6001",
2111					       "grab_tail_page failed %d",
2112					       error);
2113			page = NULL;
2114			bh = NULL;
2115		}
2116	}
2117
2118	/* so, if page != NULL, we have a buffer head for the offset at
2119	 ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
2120	 ** then we have an unformatted node.  Otherwise, we have a direct item,
2121	 ** and no zeroing is required on disk.  We zero after the truncate,
2122	 ** because the truncate might pack the item anyway
2123	 ** (it will unmap bh if it packs).
2124	 */
2125	/* it is enough to reserve space in transaction for 2 balancings:
2126	   one for "save" link adding and another for the first
2127	   cut_from_item. 1 is for update_sd */
 
 
2128	error = journal_begin(&th, inode->i_sb,
2129			      JOURNAL_PER_BALANCE_CNT * 2 + 1);
2130	if (error)
2131		goto out;
2132	reiserfs_update_inode_transaction(inode);
2133	if (update_timestamps)
2134		/* we are doing real truncate: if the system crashes before the last
2135		   transaction of truncating gets committed - on reboot the file
2136		   either appears truncated properly or not truncated at all */
 
 
 
2137		add_save_link(&th, inode, 1);
2138	err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
2139	error =
2140	    journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1);
2141	if (error)
2142		goto out;
2143
2144	/* check reiserfs_do_truncate after ending the transaction */
2145	if (err2) {
2146		error = err2;
2147  		goto out;
2148	}
2149	
2150	if (update_timestamps) {
2151		error = remove_save_link(inode, 1 /* truncate */);
2152		if (error)
2153			goto out;
2154	}
2155
2156	if (page) {
2157		length = offset & (blocksize - 1);
2158		/* if we are not on a block boundary */
2159		if (length) {
2160			length = blocksize - length;
2161			zero_user(page, offset, length);
2162			if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2163				mark_buffer_dirty(bh);
2164			}
2165		}
2166		unlock_page(page);
2167		page_cache_release(page);
2168	}
2169
2170	reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2171
2172	return 0;
2173      out:
2174	if (page) {
2175		unlock_page(page);
2176		page_cache_release(page);
2177	}
2178
2179	reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2180
2181	return error;
2182}
2183
2184static int map_block_for_writepage(struct inode *inode,
2185				   struct buffer_head *bh_result,
2186				   unsigned long block)
2187{
2188	struct reiserfs_transaction_handle th;
2189	int fs_gen;
2190	struct item_head tmp_ih;
2191	struct item_head *ih;
2192	struct buffer_head *bh;
2193	__le32 *item;
2194	struct cpu_key key;
2195	INITIALIZE_PATH(path);
2196	int pos_in_item;
2197	int jbegin_count = JOURNAL_PER_BALANCE_CNT;
2198	loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
2199	int retval;
2200	int use_get_block = 0;
2201	int bytes_copied = 0;
2202	int copy_size;
2203	int trans_running = 0;
2204
2205	/* catch places below that try to log something without starting a trans */
 
 
 
2206	th.t_trans_id = 0;
2207
2208	if (!buffer_uptodate(bh_result)) {
2209		return -EIO;
2210	}
2211
2212	kmap(bh_result->b_page);
2213      start_over:
2214	reiserfs_write_lock(inode->i_sb);
2215	make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
2216
2217      research:
2218	retval = search_for_position_by_key(inode->i_sb, &key, &path);
2219	if (retval != POSITION_FOUND) {
2220		use_get_block = 1;
2221		goto out;
2222	}
2223
2224	bh = get_last_bh(&path);
2225	ih = get_ih(&path);
2226	item = get_item(&path);
2227	pos_in_item = path.pos_in_item;
2228
2229	/* we've found an unformatted node */
2230	if (indirect_item_found(retval, ih)) {
2231		if (bytes_copied > 0) {
2232			reiserfs_warning(inode->i_sb, "clm-6002",
2233					 "bytes_copied %d", bytes_copied);
2234		}
2235		if (!get_block_num(item, pos_in_item)) {
2236			/* crap, we are writing to a hole */
2237			use_get_block = 1;
2238			goto out;
2239		}
2240		set_block_dev_mapped(bh_result,
2241				     get_block_num(item, pos_in_item), inode);
2242	} else if (is_direct_le_ih(ih)) {
2243		char *p;
2244		p = page_address(bh_result->b_page);
2245		p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1);
2246		copy_size = ih_item_len(ih) - pos_in_item;
2247
2248		fs_gen = get_generation(inode->i_sb);
2249		copy_item_head(&tmp_ih, ih);
2250
2251		if (!trans_running) {
2252			/* vs-3050 is gone, no need to drop the path */
2253			retval = journal_begin(&th, inode->i_sb, jbegin_count);
2254			if (retval)
2255				goto out;
2256			reiserfs_update_inode_transaction(inode);
2257			trans_running = 1;
2258			if (fs_changed(fs_gen, inode->i_sb)
2259			    && item_moved(&tmp_ih, &path)) {
2260				reiserfs_restore_prepared_buffer(inode->i_sb,
2261								 bh);
2262				goto research;
2263			}
2264		}
2265
2266		reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
2267
2268		if (fs_changed(fs_gen, inode->i_sb)
2269		    && item_moved(&tmp_ih, &path)) {
2270			reiserfs_restore_prepared_buffer(inode->i_sb, bh);
2271			goto research;
2272		}
2273
2274		memcpy(B_I_PITEM(bh, ih) + pos_in_item, p + bytes_copied,
2275		       copy_size);
2276
2277		journal_mark_dirty(&th, inode->i_sb, bh);
2278		bytes_copied += copy_size;
2279		set_block_dev_mapped(bh_result, 0, inode);
2280
2281		/* are there still bytes left? */
2282		if (bytes_copied < bh_result->b_size &&
2283		    (byte_offset + bytes_copied) < inode->i_size) {
2284			set_cpu_key_k_offset(&key,
2285					     cpu_key_k_offset(&key) +
2286					     copy_size);
2287			goto research;
2288		}
2289	} else {
2290		reiserfs_warning(inode->i_sb, "clm-6003",
2291				 "bad item inode %lu", inode->i_ino);
2292		retval = -EIO;
2293		goto out;
2294	}
2295	retval = 0;
2296
2297      out:
2298	pathrelse(&path);
2299	if (trans_running) {
2300		int err = journal_end(&th, inode->i_sb, jbegin_count);
2301		if (err)
2302			retval = err;
2303		trans_running = 0;
2304	}
2305	reiserfs_write_unlock(inode->i_sb);
2306
2307	/* this is where we fill in holes in the file. */
2308	if (use_get_block) {
2309		retval = reiserfs_get_block(inode, block, bh_result,
2310					    GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
2311					    | GET_BLOCK_NO_DANGLE);
2312		if (!retval) {
2313			if (!buffer_mapped(bh_result)
2314			    || bh_result->b_blocknr == 0) {
2315				/* get_block failed to find a mapped unformatted node. */
2316				use_get_block = 0;
2317				goto start_over;
2318			}
2319		}
2320	}
2321	kunmap(bh_result->b_page);
2322
2323	if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
2324		/* we've copied data from the page into the direct item, so the
 
2325		 * buffer in the page is now clean, mark it to reflect that.
2326		 */
2327		lock_buffer(bh_result);
2328		clear_buffer_dirty(bh_result);
2329		unlock_buffer(bh_result);
2330	}
2331	return retval;
2332}
2333
2334/*
2335 * mason@suse.com: updated in 2.5.54 to follow the same general io
2336 * start/recovery path as __block_write_full_page, along with special
2337 * code to handle reiserfs tails.
2338 */
2339static int reiserfs_write_full_page(struct page *page,
2340				    struct writeback_control *wbc)
2341{
2342	struct inode *inode = page->mapping->host;
2343	unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
2344	int error = 0;
2345	unsigned long block;
2346	sector_t last_block;
2347	struct buffer_head *head, *bh;
2348	int partial = 0;
2349	int nr = 0;
2350	int checked = PageChecked(page);
2351	struct reiserfs_transaction_handle th;
2352	struct super_block *s = inode->i_sb;
2353	int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
2354	th.t_trans_id = 0;
2355
2356	/* no logging allowed when nonblocking or from PF_MEMALLOC */
2357	if (checked && (current->flags & PF_MEMALLOC)) {
2358		redirty_page_for_writepage(wbc, page);
2359		unlock_page(page);
2360		return 0;
2361	}
2362
2363	/* The page dirty bit is cleared before writepage is called, which
 
2364	 * means we have to tell create_empty_buffers to make dirty buffers
2365	 * The page really should be up to date at this point, so tossing
2366	 * in the BH_Uptodate is just a sanity check.
2367	 */
2368	if (!page_has_buffers(page)) {
2369		create_empty_buffers(page, s->s_blocksize,
2370				     (1 << BH_Dirty) | (1 << BH_Uptodate));
2371	}
2372	head = page_buffers(page);
2373
2374	/* last page in the file, zero out any contents past the
2375	 ** last byte in the file
 
2376	 */
2377	if (page->index >= end_index) {
2378		unsigned last_offset;
2379
2380		last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
2381		/* no file contents in this page */
2382		if (page->index >= end_index + 1 || !last_offset) {
2383			unlock_page(page);
2384			return 0;
2385		}
2386		zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
2387	}
2388	bh = head;
2389	block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
2390	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2391	/* first map all the buffers, logging any direct items we find */
2392	do {
2393		if (block > last_block) {
2394			/*
2395			 * This can happen when the block size is less than
2396			 * the page size.  The corresponding bytes in the page
2397			 * were zero filled above
2398			 */
2399			clear_buffer_dirty(bh);
2400			set_buffer_uptodate(bh);
2401		} else if ((checked || buffer_dirty(bh)) &&
2402		           (!buffer_mapped(bh) || (buffer_mapped(bh)
2403						       && bh->b_blocknr ==
2404						       0))) {
2405			/* not mapped yet, or it points to a direct item, search
 
2406			 * the btree for the mapping info, and log any direct
2407			 * items found
2408			 */
2409			if ((error = map_block_for_writepage(inode, bh, block))) {
2410				goto fail;
2411			}
2412		}
2413		bh = bh->b_this_page;
2414		block++;
2415	} while (bh != head);
2416
2417	/*
2418	 * we start the transaction after map_block_for_writepage,
2419	 * because it can create holes in the file (an unbounded operation).
2420	 * starting it here, we can make a reliable estimate for how many
2421	 * blocks we're going to log
2422	 */
2423	if (checked) {
2424		ClearPageChecked(page);
2425		reiserfs_write_lock(s);
2426		error = journal_begin(&th, s, bh_per_page + 1);
2427		if (error) {
2428			reiserfs_write_unlock(s);
2429			goto fail;
2430		}
2431		reiserfs_update_inode_transaction(inode);
2432	}
2433	/* now go through and lock any dirty buffers on the page */
2434	do {
2435		get_bh(bh);
2436		if (!buffer_mapped(bh))
2437			continue;
2438		if (buffer_mapped(bh) && bh->b_blocknr == 0)
2439			continue;
2440
2441		if (checked) {
2442			reiserfs_prepare_for_journal(s, bh, 1);
2443			journal_mark_dirty(&th, s, bh);
2444			continue;
2445		}
2446		/* from this point on, we know the buffer is mapped to a
 
2447		 * real block and not a direct item
2448		 */
2449		if (wbc->sync_mode != WB_SYNC_NONE) {
2450			lock_buffer(bh);
2451		} else {
2452			if (!trylock_buffer(bh)) {
2453				redirty_page_for_writepage(wbc, page);
2454				continue;
2455			}
2456		}
2457		if (test_clear_buffer_dirty(bh)) {
2458			mark_buffer_async_write(bh);
2459		} else {
2460			unlock_buffer(bh);
2461		}
2462	} while ((bh = bh->b_this_page) != head);
2463
2464	if (checked) {
2465		error = journal_end(&th, s, bh_per_page + 1);
2466		reiserfs_write_unlock(s);
2467		if (error)
2468			goto fail;
2469	}
2470	BUG_ON(PageWriteback(page));
2471	set_page_writeback(page);
2472	unlock_page(page);
2473
2474	/*
2475	 * since any buffer might be the only dirty buffer on the page,
2476	 * the first submit_bh can bring the page out of writeback.
2477	 * be careful with the buffers.
2478	 */
2479	do {
2480		struct buffer_head *next = bh->b_this_page;
2481		if (buffer_async_write(bh)) {
2482			submit_bh(WRITE, bh);
2483			nr++;
2484		}
2485		put_bh(bh);
2486		bh = next;
2487	} while (bh != head);
2488
2489	error = 0;
2490      done:
2491	if (nr == 0) {
2492		/*
2493		 * if this page only had a direct item, it is very possible for
2494		 * no io to be required without there being an error.  Or,
2495		 * someone else could have locked them and sent them down the
2496		 * pipe without locking the page
2497		 */
2498		bh = head;
2499		do {
2500			if (!buffer_uptodate(bh)) {
2501				partial = 1;
2502				break;
2503			}
2504			bh = bh->b_this_page;
2505		} while (bh != head);
2506		if (!partial)
2507			SetPageUptodate(page);
2508		end_page_writeback(page);
2509	}
2510	return error;
2511
2512      fail:
2513	/* catches various errors, we need to make sure any valid dirty blocks
 
2514	 * get to the media.  The page is currently locked and not marked for
2515	 * writeback
2516	 */
2517	ClearPageUptodate(page);
2518	bh = head;
2519	do {
2520		get_bh(bh);
2521		if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) {
2522			lock_buffer(bh);
2523			mark_buffer_async_write(bh);
2524		} else {
2525			/*
2526			 * clear any dirty bits that might have come from getting
2527			 * attached to a dirty page
2528			 */
2529			clear_buffer_dirty(bh);
2530		}
2531		bh = bh->b_this_page;
2532	} while (bh != head);
2533	SetPageError(page);
2534	BUG_ON(PageWriteback(page));
2535	set_page_writeback(page);
2536	unlock_page(page);
2537	do {
2538		struct buffer_head *next = bh->b_this_page;
2539		if (buffer_async_write(bh)) {
2540			clear_buffer_dirty(bh);
2541			submit_bh(WRITE, bh);
2542			nr++;
2543		}
2544		put_bh(bh);
2545		bh = next;
2546	} while (bh != head);
2547	goto done;
2548}
2549
2550static int reiserfs_readpage(struct file *f, struct page *page)
2551{
2552	return block_read_full_page(page, reiserfs_get_block);
2553}
2554
2555static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
2556{
2557	struct inode *inode = page->mapping->host;
2558	reiserfs_wait_on_write_block(inode->i_sb);
2559	return reiserfs_write_full_page(page, wbc);
2560}
2561
2562static void reiserfs_truncate_failed_write(struct inode *inode)
2563{
2564	truncate_inode_pages(inode->i_mapping, inode->i_size);
2565	reiserfs_truncate_file(inode, 0);
2566}
2567
2568static int reiserfs_write_begin(struct file *file,
2569				struct address_space *mapping,
2570				loff_t pos, unsigned len, unsigned flags,
2571				struct page **pagep, void **fsdata)
2572{
2573	struct inode *inode;
2574	struct page *page;
2575	pgoff_t index;
2576	int ret;
2577	int old_ref = 0;
2578
2579 	inode = mapping->host;
2580	*fsdata = 0;
2581 	if (flags & AOP_FLAG_CONT_EXPAND &&
2582 	    (pos & (inode->i_sb->s_blocksize - 1)) == 0) {
2583 		pos ++;
2584		*fsdata = (void *)(unsigned long)flags;
2585	}
2586
2587	index = pos >> PAGE_CACHE_SHIFT;
2588	page = grab_cache_page_write_begin(mapping, index, flags);
2589	if (!page)
2590		return -ENOMEM;
2591	*pagep = page;
2592
2593	reiserfs_wait_on_write_block(inode->i_sb);
2594	fix_tail_page_for_writing(page);
2595	if (reiserfs_transaction_running(inode->i_sb)) {
2596		struct reiserfs_transaction_handle *th;
2597		th = (struct reiserfs_transaction_handle *)current->
2598		    journal_info;
2599		BUG_ON(!th->t_refcount);
2600		BUG_ON(!th->t_trans_id);
2601		old_ref = th->t_refcount;
2602		th->t_refcount++;
2603	}
2604	ret = __block_write_begin(page, pos, len, reiserfs_get_block);
2605	if (ret && reiserfs_transaction_running(inode->i_sb)) {
2606		struct reiserfs_transaction_handle *th = current->journal_info;
2607		/* this gets a little ugly.  If reiserfs_get_block returned an
2608		 * error and left a transacstion running, we've got to close it,
2609		 * and we've got to free handle if it was a persistent transaction.
 
 
2610		 *
2611		 * But, if we had nested into an existing transaction, we need
2612		 * to just drop the ref count on the handle.
2613		 *
2614		 * If old_ref == 0, the transaction is from reiserfs_get_block,
2615		 * and it was a persistent trans.  Otherwise, it was nested above.
 
2616		 */
2617		if (th->t_refcount > old_ref) {
2618			if (old_ref)
2619				th->t_refcount--;
2620			else {
2621				int err;
2622				reiserfs_write_lock(inode->i_sb);
2623				err = reiserfs_end_persistent_transaction(th);
2624				reiserfs_write_unlock(inode->i_sb);
2625				if (err)
2626					ret = err;
2627			}
2628		}
2629	}
2630	if (ret) {
2631		unlock_page(page);
2632		page_cache_release(page);
2633		/* Truncate allocated blocks */
2634		reiserfs_truncate_failed_write(inode);
2635	}
2636	return ret;
2637}
2638
2639int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
2640{
2641	struct inode *inode = page->mapping->host;
2642	int ret;
2643	int old_ref = 0;
 
2644
2645	reiserfs_write_unlock(inode->i_sb);
2646	reiserfs_wait_on_write_block(inode->i_sb);
2647	reiserfs_write_lock(inode->i_sb);
2648
2649	fix_tail_page_for_writing(page);
2650	if (reiserfs_transaction_running(inode->i_sb)) {
2651		struct reiserfs_transaction_handle *th;
2652		th = (struct reiserfs_transaction_handle *)current->
2653		    journal_info;
2654		BUG_ON(!th->t_refcount);
2655		BUG_ON(!th->t_trans_id);
2656		old_ref = th->t_refcount;
2657		th->t_refcount++;
2658	}
2659
2660	ret = __block_write_begin(page, from, len, reiserfs_get_block);
2661	if (ret && reiserfs_transaction_running(inode->i_sb)) {
2662		struct reiserfs_transaction_handle *th = current->journal_info;
2663		/* this gets a little ugly.  If reiserfs_get_block returned an
2664		 * error and left a transacstion running, we've got to close it,
2665		 * and we've got to free handle if it was a persistent transaction.
 
 
2666		 *
2667		 * But, if we had nested into an existing transaction, we need
2668		 * to just drop the ref count on the handle.
2669		 *
2670		 * If old_ref == 0, the transaction is from reiserfs_get_block,
2671		 * and it was a persistent trans.  Otherwise, it was nested above.
 
2672		 */
2673		if (th->t_refcount > old_ref) {
2674			if (old_ref)
2675				th->t_refcount--;
2676			else {
2677				int err;
2678				reiserfs_write_lock(inode->i_sb);
2679				err = reiserfs_end_persistent_transaction(th);
2680				reiserfs_write_unlock(inode->i_sb);
2681				if (err)
2682					ret = err;
2683			}
2684		}
2685	}
2686	return ret;
2687
2688}
2689
2690static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
2691{
2692	return generic_block_bmap(as, block, reiserfs_bmap);
2693}
2694
2695static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2696			      loff_t pos, unsigned len, unsigned copied,
2697			      struct page *page, void *fsdata)
2698{
2699	struct inode *inode = page->mapping->host;
2700	int ret = 0;
2701	int update_sd = 0;
2702	struct reiserfs_transaction_handle *th;
2703	unsigned start;
2704	int lock_depth = 0;
2705	bool locked = false;
2706
2707	if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
2708		pos ++;
2709
2710	reiserfs_wait_on_write_block(inode->i_sb);
2711	if (reiserfs_transaction_running(inode->i_sb))
2712		th = current->journal_info;
2713	else
2714		th = NULL;
2715
2716	start = pos & (PAGE_CACHE_SIZE - 1);
2717	if (unlikely(copied < len)) {
2718		if (!PageUptodate(page))
2719			copied = 0;
2720
2721		page_zero_new_buffers(page, start + copied, start + len);
2722	}
2723	flush_dcache_page(page);
2724
2725	reiserfs_commit_page(inode, page, start, start + copied);
2726
2727	/* generic_commit_write does this for us, but does not update the
2728	 ** transaction tracking stuff when the size changes.  So, we have
2729	 ** to do the i_size updates here.
 
2730	 */
2731	if (pos + copied > inode->i_size) {
2732		struct reiserfs_transaction_handle myth;
2733		lock_depth = reiserfs_write_lock_once(inode->i_sb);
2734		locked = true;
2735		/* If the file have grown beyond the border where it
2736		   can have a tail, unmark it as needing a tail
2737		   packing */
 
 
2738		if ((have_large_tails(inode->i_sb)
2739		     && inode->i_size > i_block_size(inode) * 4)
2740		    || (have_small_tails(inode->i_sb)
2741			&& inode->i_size > i_block_size(inode)))
2742			REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2743
2744		ret = journal_begin(&myth, inode->i_sb, 1);
2745		if (ret)
2746			goto journal_error;
2747
2748		reiserfs_update_inode_transaction(inode);
2749		inode->i_size = pos + copied;
2750		/*
2751		 * this will just nest into our transaction.  It's important
2752		 * to use mark_inode_dirty so the inode gets pushed around on the
2753		 * dirty lists, and so that O_SYNC works as expected
2754		 */
2755		mark_inode_dirty(inode);
2756		reiserfs_update_sd(&myth, inode);
2757		update_sd = 1;
2758		ret = journal_end(&myth, inode->i_sb, 1);
2759		if (ret)
2760			goto journal_error;
2761	}
2762	if (th) {
2763		if (!locked) {
2764			lock_depth = reiserfs_write_lock_once(inode->i_sb);
2765			locked = true;
2766		}
2767		if (!update_sd)
2768			mark_inode_dirty(inode);
2769		ret = reiserfs_end_persistent_transaction(th);
2770		if (ret)
2771			goto out;
2772	}
2773
2774      out:
2775	if (locked)
2776		reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2777	unlock_page(page);
2778	page_cache_release(page);
2779
2780	if (pos + len > inode->i_size)
2781		reiserfs_truncate_failed_write(inode);
2782
2783	return ret == 0 ? copied : ret;
2784
2785      journal_error:
2786	reiserfs_write_unlock_once(inode->i_sb, lock_depth);
2787	locked = false;
2788	if (th) {
2789		if (!update_sd)
2790			reiserfs_update_sd(th, inode);
2791		ret = reiserfs_end_persistent_transaction(th);
2792	}
2793	goto out;
2794}
2795
2796int reiserfs_commit_write(struct file *f, struct page *page,
2797			  unsigned from, unsigned to)
2798{
2799	struct inode *inode = page->mapping->host;
2800	loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to;
2801	int ret = 0;
2802	int update_sd = 0;
2803	struct reiserfs_transaction_handle *th = NULL;
 
2804
2805	reiserfs_write_unlock(inode->i_sb);
2806	reiserfs_wait_on_write_block(inode->i_sb);
2807	reiserfs_write_lock(inode->i_sb);
2808
2809	if (reiserfs_transaction_running(inode->i_sb)) {
2810		th = current->journal_info;
2811	}
2812	reiserfs_commit_page(inode, page, from, to);
2813
2814	/* generic_commit_write does this for us, but does not update the
2815	 ** transaction tracking stuff when the size changes.  So, we have
2816	 ** to do the i_size updates here.
 
2817	 */
2818	if (pos > inode->i_size) {
2819		struct reiserfs_transaction_handle myth;
2820		/* If the file have grown beyond the border where it
2821		   can have a tail, unmark it as needing a tail
2822		   packing */
 
 
2823		if ((have_large_tails(inode->i_sb)
2824		     && inode->i_size > i_block_size(inode) * 4)
2825		    || (have_small_tails(inode->i_sb)
2826			&& inode->i_size > i_block_size(inode)))
2827			REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2828
2829		ret = journal_begin(&myth, inode->i_sb, 1);
2830		if (ret)
2831			goto journal_error;
2832
2833		reiserfs_update_inode_transaction(inode);
2834		inode->i_size = pos;
2835		/*
2836		 * this will just nest into our transaction.  It's important
2837		 * to use mark_inode_dirty so the inode gets pushed around on the
2838		 * dirty lists, and so that O_SYNC works as expected
2839		 */
2840		mark_inode_dirty(inode);
2841		reiserfs_update_sd(&myth, inode);
2842		update_sd = 1;
2843		ret = journal_end(&myth, inode->i_sb, 1);
2844		if (ret)
2845			goto journal_error;
2846	}
2847	if (th) {
2848		if (!update_sd)
2849			mark_inode_dirty(inode);
2850		ret = reiserfs_end_persistent_transaction(th);
2851		if (ret)
2852			goto out;
2853	}
2854
2855      out:
2856	return ret;
2857
2858      journal_error:
2859	if (th) {
2860		if (!update_sd)
2861			reiserfs_update_sd(th, inode);
2862		ret = reiserfs_end_persistent_transaction(th);
2863	}
2864
2865	return ret;
2866}
2867
2868void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
2869{
2870	if (reiserfs_attrs(inode->i_sb)) {
2871		if (sd_attrs & REISERFS_SYNC_FL)
2872			inode->i_flags |= S_SYNC;
2873		else
2874			inode->i_flags &= ~S_SYNC;
2875		if (sd_attrs & REISERFS_IMMUTABLE_FL)
2876			inode->i_flags |= S_IMMUTABLE;
2877		else
2878			inode->i_flags &= ~S_IMMUTABLE;
2879		if (sd_attrs & REISERFS_APPEND_FL)
2880			inode->i_flags |= S_APPEND;
2881		else
2882			inode->i_flags &= ~S_APPEND;
2883		if (sd_attrs & REISERFS_NOATIME_FL)
2884			inode->i_flags |= S_NOATIME;
2885		else
2886			inode->i_flags &= ~S_NOATIME;
2887		if (sd_attrs & REISERFS_NOTAIL_FL)
2888			REISERFS_I(inode)->i_flags |= i_nopack_mask;
2889		else
2890			REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
2891	}
2892}
2893
2894void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs)
2895{
2896	if (reiserfs_attrs(inode->i_sb)) {
2897		if (inode->i_flags & S_IMMUTABLE)
2898			*sd_attrs |= REISERFS_IMMUTABLE_FL;
2899		else
2900			*sd_attrs &= ~REISERFS_IMMUTABLE_FL;
2901		if (inode->i_flags & S_SYNC)
2902			*sd_attrs |= REISERFS_SYNC_FL;
2903		else
2904			*sd_attrs &= ~REISERFS_SYNC_FL;
2905		if (inode->i_flags & S_NOATIME)
2906			*sd_attrs |= REISERFS_NOATIME_FL;
2907		else
2908			*sd_attrs &= ~REISERFS_NOATIME_FL;
2909		if (REISERFS_I(inode)->i_flags & i_nopack_mask)
2910			*sd_attrs |= REISERFS_NOTAIL_FL;
2911		else
2912			*sd_attrs &= ~REISERFS_NOTAIL_FL;
2913	}
2914}
2915
2916/* decide if this buffer needs to stay around for data logging or ordered
2917** write purposes
2918*/
2919static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
2920{
2921	int ret = 1;
2922	struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
2923
2924	lock_buffer(bh);
2925	spin_lock(&j->j_dirty_buffers_lock);
2926	if (!buffer_mapped(bh)) {
2927		goto free_jh;
2928	}
2929	/* the page is locked, and the only places that log a data buffer
 
2930	 * also lock the page.
2931	 */
2932	if (reiserfs_file_data_log(inode)) {
2933		/*
2934		 * very conservative, leave the buffer pinned if
2935		 * anyone might need it.
2936		 */
2937		if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
2938			ret = 0;
2939		}
2940	} else  if (buffer_dirty(bh)) {
2941		struct reiserfs_journal_list *jl;
2942		struct reiserfs_jh *jh = bh->b_private;
2943
2944		/* why is this safe?
 
2945		 * reiserfs_setattr updates i_size in the on disk
2946		 * stat data before allowing vmtruncate to be called.
2947		 *
2948		 * If buffer was put onto the ordered list for this
2949		 * transaction, we know for sure either this transaction
2950		 * or an older one already has updated i_size on disk,
2951		 * and this ordered data won't be referenced in the file
2952		 * if we crash.
2953		 *
2954		 * if the buffer was put onto the ordered list for an older
2955		 * transaction, we need to leave it around
2956		 */
2957		if (jh && (jl = jh->jl)
2958		    && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
2959			ret = 0;
2960	}
2961      free_jh:
2962	if (ret && bh->b_private) {
2963		reiserfs_free_jh(bh);
2964	}
2965	spin_unlock(&j->j_dirty_buffers_lock);
2966	unlock_buffer(bh);
2967	return ret;
2968}
2969
2970/* clm -- taken from fs/buffer.c:block_invalidate_page */
2971static void reiserfs_invalidatepage(struct page *page, unsigned long offset)
 
2972{
2973	struct buffer_head *head, *bh, *next;
2974	struct inode *inode = page->mapping->host;
2975	unsigned int curr_off = 0;
 
 
2976	int ret = 1;
2977
2978	BUG_ON(!PageLocked(page));
2979
2980	if (offset == 0)
2981		ClearPageChecked(page);
2982
2983	if (!page_has_buffers(page))
2984		goto out;
2985
2986	head = page_buffers(page);
2987	bh = head;
2988	do {
2989		unsigned int next_off = curr_off + bh->b_size;
2990		next = bh->b_this_page;
2991
 
 
 
2992		/*
2993		 * is this block fully invalidated?
2994		 */
2995		if (offset <= curr_off) {
2996			if (invalidatepage_can_drop(inode, bh))
2997				reiserfs_unmap_buffer(bh);
2998			else
2999				ret = 0;
3000		}
3001		curr_off = next_off;
3002		bh = next;
3003	} while (bh != head);
3004
3005	/*
3006	 * We release buffers only if the entire page is being invalidated.
3007	 * The get_block cached value has been unconditionally invalidated,
3008	 * so real IO is not possible anymore.
3009	 */
3010	if (!offset && ret) {
3011		ret = try_to_release_page(page, 0);
3012		/* maybe should BUG_ON(!ret); - neilb */
3013	}
3014      out:
3015	return;
3016}
3017
3018static int reiserfs_set_page_dirty(struct page *page)
3019{
3020	struct inode *inode = page->mapping->host;
3021	if (reiserfs_file_data_log(inode)) {
3022		SetPageChecked(page);
3023		return __set_page_dirty_nobuffers(page);
3024	}
3025	return __set_page_dirty_buffers(page);
3026}
3027
3028/*
3029 * Returns 1 if the page's buffers were dropped.  The page is locked.
3030 *
3031 * Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads
3032 * in the buffers at page_buffers(page).
3033 *
3034 * even in -o notail mode, we can't be sure an old mount without -o notail
3035 * didn't create files with tails.
3036 */
3037static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
3038{
3039	struct inode *inode = page->mapping->host;
3040	struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3041	struct buffer_head *head;
3042	struct buffer_head *bh;
3043	int ret = 1;
3044
3045	WARN_ON(PageChecked(page));
3046	spin_lock(&j->j_dirty_buffers_lock);
3047	head = page_buffers(page);
3048	bh = head;
3049	do {
3050		if (bh->b_private) {
3051			if (!buffer_dirty(bh) && !buffer_locked(bh)) {
3052				reiserfs_free_jh(bh);
3053			} else {
3054				ret = 0;
3055				break;
3056			}
3057		}
3058		bh = bh->b_this_page;
3059	} while (bh != head);
3060	if (ret)
3061		ret = try_to_free_buffers(page);
3062	spin_unlock(&j->j_dirty_buffers_lock);
3063	return ret;
3064}
3065
3066/* We thank Mingming Cao for helping us understand in great detail what
3067   to do in this section of the code. */
3068static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
3069				  const struct iovec *iov, loff_t offset,
3070				  unsigned long nr_segs)
3071{
3072	struct file *file = iocb->ki_filp;
3073	struct inode *inode = file->f_mapping->host;
 
3074	ssize_t ret;
3075
3076	ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
3077				  reiserfs_get_blocks_direct_io);
3078
3079	/*
3080	 * In case of error extending write may have instantiated a few
3081	 * blocks outside i_size. Trim these off again.
3082	 */
3083	if (unlikely((rw & WRITE) && ret < 0)) {
3084		loff_t isize = i_size_read(inode);
3085		loff_t end = offset + iov_length(iov, nr_segs);
3086
3087		if (end > isize)
3088			vmtruncate(inode, isize);
 
 
3089	}
3090
3091	return ret;
3092}
3093
3094int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3095{
3096	struct inode *inode = dentry->d_inode;
3097	unsigned int ia_valid;
3098	int depth;
3099	int error;
3100
3101	error = inode_change_ok(inode, attr);
3102	if (error)
3103		return error;
3104
3105	/* must be turned off for recursive notify_change calls */
3106	ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3107
3108	depth = reiserfs_write_lock_once(inode->i_sb);
3109	if (is_quota_modification(inode, attr))
3110		dquot_initialize(inode);
3111
 
 
3112	if (attr->ia_valid & ATTR_SIZE) {
3113		/* version 2 items will be caught by the s_maxbytes check
3114		 ** done for us in vmtruncate
 
3115		 */
3116		if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
3117		    attr->ia_size > MAX_NON_LFS) {
 
3118			error = -EFBIG;
3119			goto out;
3120		}
3121
3122		inode_dio_wait(inode);
3123
3124		/* fill in hole pointers in the expanding truncate case. */
3125		if (attr->ia_size > inode->i_size) {
3126			error = generic_cont_expand_simple(inode, attr->ia_size);
3127			if (REISERFS_I(inode)->i_prealloc_count > 0) {
3128				int err;
3129				struct reiserfs_transaction_handle th;
3130				/* we're changing at most 2 bitmaps, inode + super */
3131				err = journal_begin(&th, inode->i_sb, 4);
3132				if (!err) {
3133					reiserfs_discard_prealloc(&th, inode);
3134					err = journal_end(&th, inode->i_sb, 4);
3135				}
3136				if (err)
3137					error = err;
3138			}
3139			if (error)
 
3140				goto out;
 
3141			/*
3142			 * file size is changed, ctime and mtime are
3143			 * to be updated
3144			 */
3145			attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
3146		}
3147	}
 
3148
3149	if ((((attr->ia_valid & ATTR_UID) && (attr->ia_uid & ~0xffff)) ||
3150	     ((attr->ia_valid & ATTR_GID) && (attr->ia_gid & ~0xffff))) &&
3151	    (get_inode_sd_version(inode) == STAT_DATA_V1)) {
3152		/* stat data of format v3.5 has 16 bit uid and gid */
3153		error = -EINVAL;
3154		goto out;
3155	}
3156
3157	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3158	    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3159		struct reiserfs_transaction_handle th;
3160		int jbegin_count =
3161		    2 *
3162		    (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
3163		     REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
3164		    2;
3165
3166		error = reiserfs_chown_xattrs(inode, attr);
3167
3168		if (error)
3169			return error;
3170
3171		/* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
 
 
 
 
3172		error = journal_begin(&th, inode->i_sb, jbegin_count);
 
3173		if (error)
3174			goto out;
3175		error = dquot_transfer(inode, attr);
 
3176		if (error) {
3177			journal_end(&th, inode->i_sb, jbegin_count);
 
3178			goto out;
3179		}
3180
3181		/* Update corresponding info in inode so that everything is in
3182		 * one transaction */
 
 
3183		if (attr->ia_valid & ATTR_UID)
3184			inode->i_uid = attr->ia_uid;
3185		if (attr->ia_valid & ATTR_GID)
3186			inode->i_gid = attr->ia_gid;
3187		mark_inode_dirty(inode);
3188		error = journal_end(&th, inode->i_sb, jbegin_count);
 
3189		if (error)
3190			goto out;
3191	}
3192
3193	/*
3194	 * Relax the lock here, as it might truncate the
3195	 * inode pages and wait for inode pages locks.
3196	 * To release such page lock, the owner needs the
3197	 * reiserfs lock
3198	 */
3199	reiserfs_write_unlock_once(inode->i_sb, depth);
3200	if ((attr->ia_valid & ATTR_SIZE) &&
3201	    attr->ia_size != i_size_read(inode))
3202		error = vmtruncate(inode, attr->ia_size);
 
 
 
 
 
 
 
 
 
 
 
3203
3204	if (!error) {
3205		setattr_copy(inode, attr);
3206		mark_inode_dirty(inode);
3207	}
3208	depth = reiserfs_write_lock_once(inode->i_sb);
3209
3210	if (!error && reiserfs_posixacl(inode->i_sb)) {
3211		if (attr->ia_valid & ATTR_MODE)
3212			error = reiserfs_acl_chmod(inode);
3213	}
3214
3215      out:
3216	reiserfs_write_unlock_once(inode->i_sb, depth);
3217
3218	return error;
3219}
3220
3221const struct address_space_operations reiserfs_address_space_operations = {
3222	.writepage = reiserfs_writepage,
3223	.readpage = reiserfs_readpage,
3224	.readpages = reiserfs_readpages,
3225	.releasepage = reiserfs_releasepage,
3226	.invalidatepage = reiserfs_invalidatepage,
3227	.write_begin = reiserfs_write_begin,
3228	.write_end = reiserfs_write_end,
3229	.bmap = reiserfs_aop_bmap,
3230	.direct_IO = reiserfs_direct_IO,
3231	.set_page_dirty = reiserfs_set_page_dirty,
3232};
v5.4
   1/*
   2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
   3 */
   4
   5#include <linux/time.h>
   6#include <linux/fs.h>
   7#include "reiserfs.h"
   8#include "acl.h"
   9#include "xattr.h"
  10#include <linux/exportfs.h>
  11#include <linux/pagemap.h>
  12#include <linux/highmem.h>
  13#include <linux/slab.h>
  14#include <linux/uaccess.h>
  15#include <asm/unaligned.h>
  16#include <linux/buffer_head.h>
  17#include <linux/mpage.h>
  18#include <linux/writeback.h>
  19#include <linux/quotaops.h>
  20#include <linux/swap.h>
  21#include <linux/uio.h>
  22#include <linux/bio.h>
  23
  24int reiserfs_commit_write(struct file *f, struct page *page,
  25			  unsigned from, unsigned to);
  26
  27void reiserfs_evict_inode(struct inode *inode)
  28{
  29	/*
  30	 * We need blocks for transaction + (user+group) quota
  31	 * update (possibly delete)
  32	 */
  33	int jbegin_count =
  34	    JOURNAL_PER_BALANCE_CNT * 2 +
  35	    2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
  36	struct reiserfs_transaction_handle th;
 
  37	int err;
  38
  39	if (!inode->i_nlink && !is_bad_inode(inode))
  40		dquot_initialize(inode);
  41
  42	truncate_inode_pages_final(&inode->i_data);
  43	if (inode->i_nlink)
  44		goto no_delete;
  45
  46	/*
  47	 * The = 0 happens when we abort creating a new inode
  48	 * for some reason like lack of space..
  49	 * also handles bad_inode case
  50	 */
  51	if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
  52
 
 
  53		reiserfs_delete_xattrs(inode);
  54
  55		reiserfs_write_lock(inode->i_sb);
  56
  57		if (journal_begin(&th, inode->i_sb, jbegin_count))
  58			goto out;
  59		reiserfs_update_inode_transaction(inode);
  60
  61		reiserfs_discard_prealloc(&th, inode);
  62
  63		err = reiserfs_delete_object(&th, inode);
  64
  65		/*
  66		 * Do quota update inside a transaction for journaled quotas.
  67		 * We must do that after delete_object so that quota updates
  68		 * go into the same transaction as stat data deletion
  69		 */
  70		if (!err) {
  71			int depth = reiserfs_write_unlock_nested(inode->i_sb);
  72			dquot_free_inode(inode);
  73			reiserfs_write_lock_nested(inode->i_sb, depth);
  74		}
  75
  76		if (journal_end(&th))
  77			goto out;
  78
  79		/*
  80		 * check return value from reiserfs_delete_object after
  81		 * ending the transaction
  82		 */
  83		if (err)
  84		    goto out;
  85
  86		/*
  87		 * all items of file are deleted, so we can remove
  88		 * "save" link
  89		 * we can't do anything about an error here
  90		 */
  91		remove_save_link(inode, 0 /* not truncate */);
  92out:
  93		reiserfs_write_unlock(inode->i_sb);
  94	} else {
  95		/* no object items are in the tree */
  96		;
  97	}
  98
  99	/* note this must go after the journal_end to prevent deadlock */
 100	clear_inode(inode);
 101
 102	dquot_drop(inode);
 103	inode->i_blocks = 0;
 
 104	return;
 105
 106no_delete:
 107	clear_inode(inode);
 108	dquot_drop(inode);
 109}
 110
 111static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
 112			  __u32 objectid, loff_t offset, int type, int length)
 113{
 114	key->version = version;
 115
 116	key->on_disk_key.k_dir_id = dirid;
 117	key->on_disk_key.k_objectid = objectid;
 118	set_cpu_key_k_offset(key, offset);
 119	set_cpu_key_k_type(key, type);
 120	key->key_length = length;
 121}
 122
 123/*
 124 * take base of inode_key (it comes from inode always) (dirid, objectid)
 125 * and version from an inode, set offset and type of key
 126 */
 127void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
 128		  int type, int length)
 129{
 130	_make_cpu_key(key, get_inode_item_key_version(inode),
 131		      le32_to_cpu(INODE_PKEY(inode)->k_dir_id),
 132		      le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,
 133		      length);
 134}
 135
 136/* when key is 0, do not set version and short key */
 
 
 137inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
 138			      int version,
 139			      loff_t offset, int type, int length,
 140			      int entry_count /*or ih_free_space */ )
 141{
 142	if (key) {
 143		ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);
 144		ih->ih_key.k_objectid =
 145		    cpu_to_le32(key->on_disk_key.k_objectid);
 146	}
 147	put_ih_version(ih, version);
 148	set_le_ih_k_offset(ih, offset);
 149	set_le_ih_k_type(ih, type);
 150	put_ih_item_len(ih, length);
 151	/*    set_ih_free_space (ih, 0); */
 152	/*
 153	 * for directory items it is entry count, for directs and stat
 154	 * datas - 0xffff, for indirects - 0
 155	 */
 156	put_ih_entry_count(ih, entry_count);
 157}
 158
 159/*
 160 * FIXME: we might cache recently accessed indirect item
 161 * Ugh.  Not too eager for that....
 162 * I cut the code until such time as I see a convincing argument (benchmark).
 163 * I don't want a bloated inode struct..., and I don't like code complexity....
 164 */
 165
 166/*
 167 * cutting the code is fine, since it really isn't in use yet and is easy
 168 * to add back in.  But, Vladimir has a really good idea here.  Think
 169 * about what happens for reading a file.  For each page,
 170 * The VFS layer calls reiserfs_readpage, who searches the tree to find
 171 * an indirect item.  This indirect item has X number of pointers, where
 172 * X is a big number if we've done the block allocation right.  But,
 173 * we only use one or two of these pointers during each call to readpage,
 174 * needlessly researching again later on.
 175 *
 176 * The size of the cache could be dynamic based on the size of the file.
 177 *
 178 * I'd also like to see us cache the location the stat data item, since
 179 * we are needlessly researching for that frequently.
 180 *
 181 * --chris
 182 */
 
 
 
 183
 184/*
 185 * If this page has a file tail in it, and
 186 * it was read in by get_block_create_0, the page data is valid,
 187 * but tail is still sitting in a direct item, and we can't write to
 188 * it.  So, look through this page, and check all the mapped buffers
 189 * to make sure they have valid block numbers.  Any that don't need
 190 * to be unmapped, so that __block_write_begin will correctly call
 191 * reiserfs_get_block to convert the tail into an unformatted node
 192 */
 193static inline void fix_tail_page_for_writing(struct page *page)
 194{
 195	struct buffer_head *head, *next, *bh;
 196
 197	if (page && page_has_buffers(page)) {
 198		head = page_buffers(page);
 199		bh = head;
 200		do {
 201			next = bh->b_this_page;
 202			if (buffer_mapped(bh) && bh->b_blocknr == 0) {
 203				reiserfs_unmap_buffer(bh);
 204			}
 205			bh = next;
 206		} while (bh != head);
 207	}
 208}
 209
 210/*
 211 * reiserfs_get_block does not need to allocate a block only if it has been
 212 * done already or non-hole position has been found in the indirect item
 213 */
 214static inline int allocation_needed(int retval, b_blocknr_t allocated,
 215				    struct item_head *ih,
 216				    __le32 * item, int pos_in_item)
 217{
 218	if (allocated)
 219		return 0;
 220	if (retval == POSITION_FOUND && is_indirect_le_ih(ih) &&
 221	    get_block_num(item, pos_in_item))
 222		return 0;
 223	return 1;
 224}
 225
 226static inline int indirect_item_found(int retval, struct item_head *ih)
 227{
 228	return (retval == POSITION_FOUND) && is_indirect_le_ih(ih);
 229}
 230
 231static inline void set_block_dev_mapped(struct buffer_head *bh,
 232					b_blocknr_t block, struct inode *inode)
 233{
 234	map_bh(bh, inode->i_sb, block);
 235}
 236
 237/*
 238 * files which were created in the earlier version can not be longer,
 239 * than 2 gb
 240 */
 241static int file_capable(struct inode *inode, sector_t block)
 242{
 243	/* it is new file. */
 244	if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
 245	    /* old file, but 'block' is inside of 2gb */
 246	    block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
 247		return 1;
 248
 249	return 0;
 250}
 251
 252static int restart_transaction(struct reiserfs_transaction_handle *th,
 253			       struct inode *inode, struct treepath *path)
 254{
 255	struct super_block *s = th->t_super;
 
 256	int err;
 257
 258	BUG_ON(!th->t_trans_id);
 259	BUG_ON(!th->t_refcount);
 260
 261	pathrelse(path);
 262
 263	/* we cannot restart while nested */
 264	if (th->t_refcount > 1) {
 265		return 0;
 266	}
 267	reiserfs_update_sd(th, inode);
 268	err = journal_end(th);
 269	if (!err) {
 270		err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);
 271		if (!err)
 272			reiserfs_update_inode_transaction(inode);
 273	}
 274	return err;
 275}
 276
 277/*
 278 * it is called by get_block when create == 0. Returns block number
 279 * for 'block'-th logical block of file. When it hits direct item it
 280 * returns 0 (being called from bmap) or read direct item into piece
 281 * of page (bh_result)
 282 * Please improve the english/clarity in the comment above, as it is
 283 * hard to understand.
 284 */
 285static int _get_block_create_0(struct inode *inode, sector_t block,
 286			       struct buffer_head *bh_result, int args)
 287{
 288	INITIALIZE_PATH(path);
 289	struct cpu_key key;
 290	struct buffer_head *bh;
 291	struct item_head *ih, tmp_ih;
 292	b_blocknr_t blocknr;
 293	char *p = NULL;
 294	int chars;
 295	int ret;
 296	int result;
 297	int done = 0;
 298	unsigned long offset;
 299
 300	/* prepare the key to look for the 'block'-th block of file */
 301	make_cpu_key(&key, inode,
 302		     (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
 303		     3);
 304
 305	result = search_for_position_by_key(inode->i_sb, &key, &path);
 306	if (result != POSITION_FOUND) {
 307		pathrelse(&path);
 308		if (p)
 309			kunmap(bh_result->b_page);
 310		if (result == IO_ERROR)
 311			return -EIO;
 312		/*
 313		 * We do not return -ENOENT if there is a hole but page is
 314		 * uptodate, because it means that there is some MMAPED data
 315		 * associated with it that is yet to be written to disk.
 316		 */
 317		if ((args & GET_BLOCK_NO_HOLE)
 318		    && !PageUptodate(bh_result->b_page)) {
 319			return -ENOENT;
 320		}
 321		return 0;
 322	}
 323
 324	bh = get_last_bh(&path);
 325	ih = tp_item_head(&path);
 326	if (is_indirect_le_ih(ih)) {
 327		__le32 *ind_item = (__le32 *) ih_item_body(bh, ih);
 328
 329		/*
 330		 * FIXME: here we could cache indirect item or part of it in
 331		 * the inode to avoid search_by_key in case of subsequent
 332		 * access to file
 333		 */
 334		blocknr = get_block_num(ind_item, path.pos_in_item);
 335		ret = 0;
 336		if (blocknr) {
 337			map_bh(bh_result, inode->i_sb, blocknr);
 338			if (path.pos_in_item ==
 339			    ((ih_item_len(ih) / UNFM_P_SIZE) - 1)) {
 340				set_buffer_boundary(bh_result);
 341			}
 342		} else
 343			/*
 344			 * We do not return -ENOENT if there is a hole but
 345			 * page is uptodate, because it means that there is
 346			 * some MMAPED data associated with it that is
 347			 * yet to be written to disk.
 348			 */
 349		if ((args & GET_BLOCK_NO_HOLE)
 350			    && !PageUptodate(bh_result->b_page)) {
 351			ret = -ENOENT;
 352		}
 353
 354		pathrelse(&path);
 355		if (p)
 356			kunmap(bh_result->b_page);
 357		return ret;
 358	}
 359	/* requested data are in direct item(s) */
 360	if (!(args & GET_BLOCK_READ_DIRECT)) {
 361		/*
 362		 * we are called by bmap. FIXME: we can not map block of file
 363		 * when it is stored in direct item(s)
 364		 */
 365		pathrelse(&path);
 366		if (p)
 367			kunmap(bh_result->b_page);
 368		return -ENOENT;
 369	}
 370
 371	/*
 372	 * if we've got a direct item, and the buffer or page was uptodate,
 373	 * we don't want to pull data off disk again.  skip to the
 374	 * end, where we map the buffer and return
 375	 */
 376	if (buffer_uptodate(bh_result)) {
 377		goto finished;
 378	} else
 379		/*
 380		 * grab_tail_page can trigger calls to reiserfs_get_block on
 381		 * up to date pages without any buffers.  If the page is up
 382		 * to date, we don't want read old data off disk.  Set the up
 383		 * to date bit on the buffer instead and jump to the end
 384		 */
 385	if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
 386		set_buffer_uptodate(bh_result);
 387		goto finished;
 388	}
 389	/* read file tail into part of page */
 390	offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
 391	copy_item_head(&tmp_ih, ih);
 392
 393	/*
 394	 * we only want to kmap if we are reading the tail into the page.
 395	 * this is not the common case, so we don't kmap until we are
 396	 * sure we need to.  But, this means the item might move if
 397	 * kmap schedules
 398	 */
 399	if (!p)
 400		p = (char *)kmap(bh_result->b_page);
 401
 402	p += offset;
 403	memset(p, 0, inode->i_sb->s_blocksize);
 404	do {
 405		if (!is_direct_le_ih(ih)) {
 406			BUG();
 407		}
 408		/*
 409		 * make sure we don't read more bytes than actually exist in
 410		 * the file.  This can happen in odd cases where i_size isn't
 411		 * correct, and when direct item padding results in a few
 412		 * extra bytes at the end of the direct item
 413		 */
 414		if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
 415			break;
 416		if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
 417			chars =
 418			    inode->i_size - (le_ih_k_offset(ih) - 1) -
 419			    path.pos_in_item;
 420			done = 1;
 421		} else {
 422			chars = ih_item_len(ih) - path.pos_in_item;
 423		}
 424		memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars);
 425
 426		if (done)
 427			break;
 428
 429		p += chars;
 430
 431		/*
 432		 * we done, if read direct item is not the last item of
 433		 * node FIXME: we could try to check right delimiting key
 434		 * to see whether direct item continues in the right
 435		 * neighbor or rely on i_size
 436		 */
 437		if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
 
 
 
 
 438			break;
 439
 440		/* update key to look for the next piece */
 441		set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
 442		result = search_for_position_by_key(inode->i_sb, &key, &path);
 443		if (result != POSITION_FOUND)
 444			/* i/o error most likely */
 445			break;
 446		bh = get_last_bh(&path);
 447		ih = tp_item_head(&path);
 448	} while (1);
 449
 450	flush_dcache_page(bh_result->b_page);
 451	kunmap(bh_result->b_page);
 452
 453finished:
 454	pathrelse(&path);
 455
 456	if (result == IO_ERROR)
 457		return -EIO;
 458
 459	/*
 460	 * this buffer has valid data, but isn't valid for io.  mapping it to
 461	 * block #0 tells the rest of reiserfs it just has a tail in it
 462	 */
 463	map_bh(bh_result, inode->i_sb, 0);
 464	set_buffer_uptodate(bh_result);
 465	return 0;
 466}
 467
 468/*
 469 * this is called to create file map. So, _get_block_create_0 will not
 470 * read direct item
 471 */
 472static int reiserfs_bmap(struct inode *inode, sector_t block,
 473			 struct buffer_head *bh_result, int create)
 474{
 475	if (!file_capable(inode, block))
 476		return -EFBIG;
 477
 478	reiserfs_write_lock(inode->i_sb);
 479	/* do not read the direct item */
 480	_get_block_create_0(inode, block, bh_result, 0);
 481	reiserfs_write_unlock(inode->i_sb);
 482	return 0;
 483}
 484
 485/*
 486 * special version of get_block that is only used by grab_tail_page right
 487 * now.  It is sent to __block_write_begin, and when you try to get a
 488 * block past the end of the file (or a block from a hole) it returns
 489 * -ENOENT instead of a valid buffer.  __block_write_begin expects to
 490 * be able to do i/o on the buffers returned, unless an error value
 491 * is also returned.
 492 *
 493 * So, this allows __block_write_begin to be used for reading a single block
 494 * in a page.  Where it does not produce a valid page for holes, or past the
 495 * end of the file.  This turns out to be exactly what we need for reading
 496 * tails for conversion.
 497 *
 498 * The point of the wrapper is forcing a certain value for create, even
 499 * though the VFS layer is calling this function with create==1.  If you
 500 * don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
 501 * don't use this function.
 502*/
 503static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
 504				       struct buffer_head *bh_result,
 505				       int create)
 506{
 507	return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
 508}
 509
 510/*
 511 * This is special helper for reiserfs_get_block in case we are executing
 512 * direct_IO request.
 513 */
 514static int reiserfs_get_blocks_direct_io(struct inode *inode,
 515					 sector_t iblock,
 516					 struct buffer_head *bh_result,
 517					 int create)
 518{
 519	int ret;
 520
 521	bh_result->b_page = NULL;
 522
 523	/*
 524	 * We set the b_size before reiserfs_get_block call since it is
 525	 * referenced in convert_tail_for_hole() that may be called from
 526	 * reiserfs_get_block()
 527	 */
 528	bh_result->b_size = i_blocksize(inode);
 529
 530	ret = reiserfs_get_block(inode, iblock, bh_result,
 531				 create | GET_BLOCK_NO_DANGLE);
 532	if (ret)
 533		goto out;
 534
 535	/* don't allow direct io onto tail pages */
 536	if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
 537		/*
 538		 * make sure future calls to the direct io funcs for this
 539		 * offset in the file fail by unmapping the buffer
 540		 */
 541		clear_buffer_mapped(bh_result);
 542		ret = -EINVAL;
 543	}
 544
 545	/*
 546	 * Possible unpacked tail. Flush the data before pages have
 547	 * disappeared
 548	 */
 549	if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
 550		int err;
 551
 552		reiserfs_write_lock(inode->i_sb);
 553
 554		err = reiserfs_commit_for_inode(inode);
 555		REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
 556
 557		reiserfs_write_unlock(inode->i_sb);
 558
 559		if (err < 0)
 560			ret = err;
 561	}
 562out:
 563	return ret;
 564}
 565
 566/*
 567 * helper function for when reiserfs_get_block is called for a hole
 568 * but the file tail is still in a direct item
 569 * bh_result is the buffer head for the hole
 570 * tail_offset is the offset of the start of the tail in the file
 571 *
 572 * This calls prepare_write, which will start a new transaction
 573 * you should not be in a transaction, or have any paths held when you
 574 * call this.
 575 */
 576static int convert_tail_for_hole(struct inode *inode,
 577				 struct buffer_head *bh_result,
 578				 loff_t tail_offset)
 579{
 580	unsigned long index;
 581	unsigned long tail_end;
 582	unsigned long tail_start;
 583	struct page *tail_page;
 584	struct page *hole_page = bh_result->b_page;
 585	int retval = 0;
 586
 587	if ((tail_offset & (bh_result->b_size - 1)) != 1)
 588		return -EIO;
 589
 590	/* always try to read until the end of the block */
 591	tail_start = tail_offset & (PAGE_SIZE - 1);
 592	tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
 593
 594	index = tail_offset >> PAGE_SHIFT;
 595	/*
 596	 * hole_page can be zero in case of direct_io, we are sure
 597	 * that we cannot get here if we write with O_DIRECT into tail page
 598	 */
 599	if (!hole_page || index != hole_page->index) {
 600		tail_page = grab_cache_page(inode->i_mapping, index);
 601		retval = -ENOMEM;
 602		if (!tail_page) {
 603			goto out;
 604		}
 605	} else {
 606		tail_page = hole_page;
 607	}
 608
 609	/*
 610	 * we don't have to make sure the conversion did not happen while
 611	 * we were locking the page because anyone that could convert
 612	 * must first take i_mutex.
 613	 *
 614	 * We must fix the tail page for writing because it might have buffers
 615	 * that are mapped, but have a block number of 0.  This indicates tail
 616	 * data that has been read directly into the page, and
 617	 * __block_write_begin won't trigger a get_block in this case.
 618	 */
 619	fix_tail_page_for_writing(tail_page);
 620	retval = __reiserfs_write_begin(tail_page, tail_start,
 621				      tail_end - tail_start);
 622	if (retval)
 623		goto unlock;
 624
 625	/* tail conversion might change the data in the page */
 626	flush_dcache_page(tail_page);
 627
 628	retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end);
 629
 630unlock:
 631	if (tail_page != hole_page) {
 632		unlock_page(tail_page);
 633		put_page(tail_page);
 634	}
 635out:
 636	return retval;
 637}
 638
 639static inline int _allocate_block(struct reiserfs_transaction_handle *th,
 640				  sector_t block,
 641				  struct inode *inode,
 642				  b_blocknr_t * allocated_block_nr,
 643				  struct treepath *path, int flags)
 644{
 645	BUG_ON(!th->t_trans_id);
 646
 647#ifdef REISERFS_PREALLOCATE
 648	if (!(flags & GET_BLOCK_NO_IMUX)) {
 649		return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
 650						  path, block);
 651	}
 652#endif
 653	return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path,
 654					 block);
 655}
 656
 657int reiserfs_get_block(struct inode *inode, sector_t block,
 658		       struct buffer_head *bh_result, int create)
 659{
 660	int repeat, retval = 0;
 661	/* b_blocknr_t is (unsigned) 32 bit int*/
 662	b_blocknr_t allocated_block_nr = 0;
 663	INITIALIZE_PATH(path);
 664	int pos_in_item;
 665	struct cpu_key key;
 666	struct buffer_head *bh, *unbh = NULL;
 667	struct item_head *ih, tmp_ih;
 668	__le32 *item;
 669	int done;
 670	int fs_gen;
 
 671	struct reiserfs_transaction_handle *th = NULL;
 672	/*
 673	 * space reserved in transaction batch:
 674	 * . 3 balancings in direct->indirect conversion
 675	 * . 1 block involved into reiserfs_update_sd()
 676	 * XXX in practically impossible worst case direct2indirect()
 677	 * can incur (much) more than 3 balancings.
 678	 * quota update for user, group
 679	 */
 680	int jbegin_count =
 681	    JOURNAL_PER_BALANCE_CNT * 3 + 1 +
 682	    2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
 683	int version;
 684	int dangle = 1;
 685	loff_t new_offset =
 686	    (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
 687
 688	reiserfs_write_lock(inode->i_sb);
 689	version = get_inode_item_key_version(inode);
 690
 691	if (!file_capable(inode, block)) {
 692		reiserfs_write_unlock(inode->i_sb);
 693		return -EFBIG;
 694	}
 695
 696	/*
 697	 * if !create, we aren't changing the FS, so we don't need to
 698	 * log anything, so we don't need to start a transaction
 699	 */
 700	if (!(create & GET_BLOCK_CREATE)) {
 701		int ret;
 702		/* find number of block-th logical block of the file */
 703		ret = _get_block_create_0(inode, block, bh_result,
 704					  create | GET_BLOCK_READ_DIRECT);
 705		reiserfs_write_unlock(inode->i_sb);
 706		return ret;
 707	}
 708
 709	/*
 710	 * if we're already in a transaction, make sure to close
 711	 * any new transactions we start in this func
 712	 */
 713	if ((create & GET_BLOCK_NO_DANGLE) ||
 714	    reiserfs_transaction_running(inode->i_sb))
 715		dangle = 0;
 716
 717	/*
 718	 * If file is of such a size, that it might have a tail and
 719	 * tails are enabled  we should mark it as possibly needing
 720	 * tail packing on close
 721	 */
 722	if ((have_large_tails(inode->i_sb)
 723	     && inode->i_size < i_block_size(inode) * 4)
 724	    || (have_small_tails(inode->i_sb)
 725		&& inode->i_size < i_block_size(inode)))
 726		REISERFS_I(inode)->i_flags |= i_pack_on_close_mask;
 727
 728	/* set the key of the first byte in the 'block'-th block of file */
 729	make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ );
 730	if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
 731start_trans:
 732		th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
 733		if (!th) {
 734			retval = -ENOMEM;
 735			goto failure;
 736		}
 737		reiserfs_update_inode_transaction(inode);
 738	}
 739research:
 740
 741	retval = search_for_position_by_key(inode->i_sb, &key, &path);
 742	if (retval == IO_ERROR) {
 743		retval = -EIO;
 744		goto failure;
 745	}
 746
 747	bh = get_last_bh(&path);
 748	ih = tp_item_head(&path);
 749	item = tp_item_body(&path);
 750	pos_in_item = path.pos_in_item;
 751
 752	fs_gen = get_generation(inode->i_sb);
 753	copy_item_head(&tmp_ih, ih);
 754
 755	if (allocation_needed
 756	    (retval, allocated_block_nr, ih, item, pos_in_item)) {
 757		/* we have to allocate block for the unformatted node */
 758		if (!th) {
 759			pathrelse(&path);
 760			goto start_trans;
 761		}
 762
 763		repeat =
 764		    _allocate_block(th, block, inode, &allocated_block_nr,
 765				    &path, create);
 766
 767		/*
 768		 * restart the transaction to give the journal a chance to free
 769		 * some blocks.  releases the path, so we have to go back to
 770		 * research if we succeed on the second try
 771		 */
 772		if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
 
 
 
 
 773			SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
 774			retval = restart_transaction(th, inode, &path);
 775			if (retval)
 776				goto failure;
 777			repeat =
 778			    _allocate_block(th, block, inode,
 779					    &allocated_block_nr, NULL, create);
 780
 781			if (repeat != NO_DISK_SPACE && repeat != QUOTA_EXCEEDED) {
 782				goto research;
 783			}
 784			if (repeat == QUOTA_EXCEEDED)
 785				retval = -EDQUOT;
 786			else
 787				retval = -ENOSPC;
 788			goto failure;
 789		}
 790
 791		if (fs_changed(fs_gen, inode->i_sb)
 792		    && item_moved(&tmp_ih, &path)) {
 793			goto research;
 794		}
 795	}
 796
 797	if (indirect_item_found(retval, ih)) {
 798		b_blocknr_t unfm_ptr;
 799		/*
 800		 * 'block'-th block is in the file already (there is
 801		 * corresponding cell in some indirect item). But it may be
 802		 * zero unformatted node pointer (hole)
 803		 */
 804		unfm_ptr = get_block_num(item, pos_in_item);
 805		if (unfm_ptr == 0) {
 806			/* use allocated block to plug the hole */
 807			reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
 808			if (fs_changed(fs_gen, inode->i_sb)
 809			    && item_moved(&tmp_ih, &path)) {
 810				reiserfs_restore_prepared_buffer(inode->i_sb,
 811								 bh);
 812				goto research;
 813			}
 814			set_buffer_new(bh_result);
 815			if (buffer_dirty(bh_result)
 816			    && reiserfs_data_ordered(inode->i_sb))
 817				reiserfs_add_ordered_list(inode, bh_result);
 818			put_block_num(item, pos_in_item, allocated_block_nr);
 819			unfm_ptr = allocated_block_nr;
 820			journal_mark_dirty(th, bh);
 821			reiserfs_update_sd(th, inode);
 822		}
 823		set_block_dev_mapped(bh_result, unfm_ptr, inode);
 824		pathrelse(&path);
 825		retval = 0;
 826		if (!dangle && th)
 827			retval = reiserfs_end_persistent_transaction(th);
 828
 829		reiserfs_write_unlock(inode->i_sb);
 830
 831		/*
 832		 * the item was found, so new blocks were not added to the file
 833		 * there is no need to make sure the inode is updated with this
 834		 * transaction
 835		 */
 836		return retval;
 837	}
 838
 839	if (!th) {
 840		pathrelse(&path);
 841		goto start_trans;
 842	}
 843
 844	/*
 845	 * desired position is not found or is in the direct item. We have
 846	 * to append file with holes up to 'block'-th block converting
 847	 * direct items to indirect one if necessary
 848	 */
 849	done = 0;
 850	do {
 851		if (is_statdata_le_ih(ih)) {
 852			__le32 unp = 0;
 853			struct cpu_key tmp_key;
 854
 855			/* indirect item has to be inserted */
 856			make_le_item_head(&tmp_ih, &key, version, 1,
 857					  TYPE_INDIRECT, UNFM_P_SIZE,
 858					  0 /* free_space */ );
 859
 860			/*
 861			 * we are going to add 'block'-th block to the file.
 862			 * Use allocated block for that
 863			 */
 864			if (cpu_key_k_offset(&key) == 1) {
 
 
 865				unp = cpu_to_le32(allocated_block_nr);
 866				set_block_dev_mapped(bh_result,
 867						     allocated_block_nr, inode);
 868				set_buffer_new(bh_result);
 869				done = 1;
 870			}
 871			tmp_key = key;	/* ;) */
 872			set_cpu_key_k_offset(&tmp_key, 1);
 873			PATH_LAST_POSITION(&path)++;
 874
 875			retval =
 876			    reiserfs_insert_item(th, &path, &tmp_key, &tmp_ih,
 877						 inode, (char *)&unp);
 878			if (retval) {
 879				reiserfs_free_block(th, inode,
 880						    allocated_block_nr, 1);
 881				/*
 882				 * retval == -ENOSPC, -EDQUOT or -EIO
 883				 * or -EEXIST
 884				 */
 885				goto failure;
 886			}
 
 887		} else if (is_direct_le_ih(ih)) {
 888			/* direct item has to be converted */
 889			loff_t tail_offset;
 890
 891			tail_offset =
 892			    ((le_ih_k_offset(ih) -
 893			      1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
 894
 895			/*
 896			 * direct item we just found fits into block we have
 897			 * to map. Convert it into unformatted node: use
 898			 * bh_result for the conversion
 899			 */
 900			if (tail_offset == cpu_key_k_offset(&key)) {
 
 
 
 901				set_block_dev_mapped(bh_result,
 902						     allocated_block_nr, inode);
 903				unbh = bh_result;
 904				done = 1;
 905			} else {
 906				/*
 907				 * we have to pad file tail stored in direct
 908				 * item(s) up to block size and convert it
 909				 * to unformatted node. FIXME: this should
 910				 * also get into page cache
 911				 */
 912
 913				pathrelse(&path);
 914				/*
 915				 * ugly, but we can only end the transaction if
 916				 * we aren't nested
 917				 */
 918				BUG_ON(!th->t_refcount);
 919				if (th->t_refcount == 1) {
 920					retval =
 921					    reiserfs_end_persistent_transaction
 922					    (th);
 923					th = NULL;
 924					if (retval)
 925						goto failure;
 926				}
 927
 928				retval =
 929				    convert_tail_for_hole(inode, bh_result,
 930							  tail_offset);
 931				if (retval) {
 932					if (retval != -ENOSPC)
 933						reiserfs_error(inode->i_sb,
 934							"clm-6004",
 935							"convert tail failed "
 936							"inode %lu, error %d",
 937							inode->i_ino,
 938							retval);
 939					if (allocated_block_nr) {
 940						/*
 941						 * the bitmap, the super,
 942						 * and the stat data == 3
 943						 */
 944						if (!th)
 945							th = reiserfs_persistent_transaction(inode->i_sb, 3);
 946						if (th)
 947							reiserfs_free_block(th,
 948									    inode,
 949									    allocated_block_nr,
 950									    1);
 951					}
 952					goto failure;
 953				}
 954				goto research;
 955			}
 956			retval =
 957			    direct2indirect(th, inode, &path, unbh,
 958					    tail_offset);
 959			if (retval) {
 960				reiserfs_unmap_buffer(unbh);
 961				reiserfs_free_block(th, inode,
 962						    allocated_block_nr, 1);
 963				goto failure;
 964			}
 965			/*
 966			 * it is important the set_buffer_uptodate is done
 967			 * after the direct2indirect.  The buffer might
 968			 * contain valid data newer than the data on disk
 969			 * (read by readpage, changed, and then sent here by
 970			 * writepage).  direct2indirect needs to know if unbh
 971			 * was already up to date, so it can decide if the
 972			 * data in unbh needs to be replaced with data from
 973			 * the disk
 974			 */
 975			set_buffer_uptodate(unbh);
 976
 977			/*
 978			 * unbh->b_page == NULL in case of DIRECT_IO request,
 979			 * this means buffer will disappear shortly, so it
 980			 * should not be added to
 981			 */
 982			if (unbh->b_page) {
 983				/*
 984				 * we've converted the tail, so we must
 985				 * flush unbh before the transaction commits
 986				 */
 987				reiserfs_add_tail_list(inode, unbh);
 988
 989				/*
 990				 * mark it dirty now to prevent commit_write
 991				 * from adding this buffer to the inode's
 992				 * dirty buffer list
 993				 */
 994				/*
 995				 * AKPM: changed __mark_buffer_dirty to
 996				 * mark_buffer_dirty().  It's still atomic,
 997				 * but it sets the page dirty too, which makes
 998				 * it eligible for writeback at any time by the
 999				 * VM (which was also the case with
1000				 * __mark_buffer_dirty())
1001				 */
1002				mark_buffer_dirty(unbh);
1003			}
1004		} else {
1005			/*
1006			 * append indirect item with holes if needed, when
1007			 * appending pointer to 'block'-th block use block,
1008			 * which is already allocated
1009			 */
1010			struct cpu_key tmp_key;
1011			/*
1012			 * We use this in case we need to allocate
1013			 * only one block which is a fastpath
1014			 */
1015			unp_t unf_single = 0;
1016			unp_t *un;
1017			__u64 max_to_insert =
1018			    MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
1019			    UNFM_P_SIZE;
1020			__u64 blocks_needed;
1021
1022			RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
1023			       "vs-804: invalid position for append");
1024			/*
1025			 * indirect item has to be appended,
1026			 * set up key of that position
1027			 * (key type is unimportant)
1028			 */
1029			make_cpu_key(&tmp_key, inode,
1030				     le_key_k_offset(version,
1031						     &ih->ih_key) +
1032				     op_bytes_number(ih,
1033						     inode->i_sb->s_blocksize),
1034				     TYPE_INDIRECT, 3);
 
1035
1036			RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
1037			       "green-805: invalid offset");
1038			blocks_needed =
1039			    1 +
1040			    ((cpu_key_k_offset(&key) -
1041			      cpu_key_k_offset(&tmp_key)) >> inode->i_sb->
1042			     s_blocksize_bits);
1043
1044			if (blocks_needed == 1) {
1045				un = &unf_single;
1046			} else {
1047				un = kcalloc(min(blocks_needed, max_to_insert),
1048					     UNFM_P_SIZE, GFP_NOFS);
1049				if (!un) {
1050					un = &unf_single;
1051					blocks_needed = 1;
1052					max_to_insert = 0;
1053				}
1054			}
1055			if (blocks_needed <= max_to_insert) {
1056				/*
1057				 * we are going to add target block to
1058				 * the file. Use allocated block for that
1059				 */
1060				un[blocks_needed - 1] =
1061				    cpu_to_le32(allocated_block_nr);
1062				set_block_dev_mapped(bh_result,
1063						     allocated_block_nr, inode);
1064				set_buffer_new(bh_result);
1065				done = 1;
1066			} else {
1067				/* paste hole to the indirect item */
1068				/*
1069				 * If kmalloc failed, max_to_insert becomes
1070				 * zero and it means we only have space for
1071				 * one block
1072				 */
1073				blocks_needed =
1074				    max_to_insert ? max_to_insert : 1;
1075			}
1076			retval =
1077			    reiserfs_paste_into_item(th, &path, &tmp_key, inode,
1078						     (char *)un,
1079						     UNFM_P_SIZE *
1080						     blocks_needed);
1081
1082			if (blocks_needed != 1)
1083				kfree(un);
1084
1085			if (retval) {
1086				reiserfs_free_block(th, inode,
1087						    allocated_block_nr, 1);
1088				goto failure;
1089			}
1090			if (!done) {
1091				/*
1092				 * We need to mark new file size in case
1093				 * this function will be interrupted/aborted
1094				 * later on. And we may do this only for
1095				 * holes.
1096				 */
1097				inode->i_size +=
1098				    inode->i_sb->s_blocksize * blocks_needed;
1099			}
1100		}
1101
1102		if (done == 1)
1103			break;
1104
1105		/*
1106		 * this loop could log more blocks than we had originally
1107		 * asked for.  So, we have to allow the transaction to end
1108		 * if it is too big or too full.  Update the inode so things
1109		 * are consistent if we crash before the function returns
1110		 * release the path so that anybody waiting on the path before
1111		 * ending their transaction will be able to continue.
1112		 */
1113		if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
1114			retval = restart_transaction(th, inode, &path);
1115			if (retval)
1116				goto failure;
1117		}
1118		/*
1119		 * inserting indirect pointers for a hole can take a
1120		 * long time.  reschedule if needed and also release the write
1121		 * lock for others.
1122		 */
1123		reiserfs_cond_resched(inode->i_sb);
 
 
 
 
1124
1125		retval = search_for_position_by_key(inode->i_sb, &key, &path);
1126		if (retval == IO_ERROR) {
1127			retval = -EIO;
1128			goto failure;
1129		}
1130		if (retval == POSITION_FOUND) {
1131			reiserfs_warning(inode->i_sb, "vs-825",
1132					 "%K should not be found", &key);
1133			retval = -EEXIST;
1134			if (allocated_block_nr)
1135				reiserfs_free_block(th, inode,
1136						    allocated_block_nr, 1);
1137			pathrelse(&path);
1138			goto failure;
1139		}
1140		bh = get_last_bh(&path);
1141		ih = tp_item_head(&path);
1142		item = tp_item_body(&path);
1143		pos_in_item = path.pos_in_item;
1144	} while (1);
1145
1146	retval = 0;
1147
1148failure:
1149	if (th && (!dangle || (retval && !th->t_trans_id))) {
1150		int err;
1151		if (th->t_trans_id)
1152			reiserfs_update_sd(th, inode);
1153		err = reiserfs_end_persistent_transaction(th);
1154		if (err)
1155			retval = err;
1156	}
1157
1158	reiserfs_write_unlock(inode->i_sb);
1159	reiserfs_check_path(&path);
1160	return retval;
1161}
1162
1163static int
1164reiserfs_readpages(struct file *file, struct address_space *mapping,
1165		   struct list_head *pages, unsigned nr_pages)
1166{
1167	return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
1168}
1169
1170/*
1171 * Compute real number of used bytes by file
1172 * Following three functions can go away when we'll have enough space in
1173 * stat item
1174 */
1175static int real_space_diff(struct inode *inode, int sd_size)
1176{
1177	int bytes;
1178	loff_t blocksize = inode->i_sb->s_blocksize;
1179
1180	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
1181		return sd_size;
1182
1183	/*
1184	 * End of file is also in full block with indirect reference, so round
1185	 * up to the next block.
1186	 *
1187	 * there is just no way to know if the tail is actually packed
1188	 * on the file, so we have to assume it isn't.  When we pack the
1189	 * tail, we add 4 bytes to pretend there really is an unformatted
1190	 * node pointer
1191	 */
1192	bytes =
1193	    ((inode->i_size +
1194	      (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +
1195	    sd_size;
1196	return bytes;
1197}
1198
1199static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
1200					int sd_size)
1201{
1202	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1203		return inode->i_size +
1204		    (loff_t) (real_space_diff(inode, sd_size));
1205	}
1206	return ((loff_t) real_space_diff(inode, sd_size)) +
1207	    (((loff_t) blocks) << 9);
1208}
1209
1210/* Compute number of blocks used by file in ReiserFS counting */
1211static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
1212{
1213	loff_t bytes = inode_get_bytes(inode);
1214	loff_t real_space = real_space_diff(inode, sd_size);
1215
1216	/* keeps fsck and non-quota versions of reiserfs happy */
1217	if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1218		bytes += (loff_t) 511;
1219	}
1220
1221	/*
1222	 * files from before the quota patch might i_blocks such that
1223	 * bytes < real_space.  Deal with that here to prevent it from
1224	 * going negative.
1225	 */
1226	if (bytes < real_space)
1227		return 0;
1228	return (bytes - real_space) >> 9;
1229}
1230
1231/*
1232 * BAD: new directories have stat data of new type and all other items
1233 * of old type. Version stored in the inode says about body items, so
1234 * in update_stat_data we can not rely on inode, but have to check
1235 * item version directly
1236 */
1237
1238/* called by read_locked_inode */
1239static void init_inode(struct inode *inode, struct treepath *path)
1240{
1241	struct buffer_head *bh;
1242	struct item_head *ih;
1243	__u32 rdev;
 
1244
1245	bh = PATH_PLAST_BUFFER(path);
1246	ih = tp_item_head(path);
1247
1248	copy_key(INODE_PKEY(inode), &ih->ih_key);
1249
1250	INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
1251	REISERFS_I(inode)->i_flags = 0;
1252	REISERFS_I(inode)->i_prealloc_block = 0;
1253	REISERFS_I(inode)->i_prealloc_count = 0;
1254	REISERFS_I(inode)->i_trans_id = 0;
1255	REISERFS_I(inode)->i_jl = NULL;
1256	reiserfs_init_xattr_rwsem(inode);
1257
1258	if (stat_data_v1(ih)) {
1259		struct stat_data_v1 *sd =
1260		    (struct stat_data_v1 *)ih_item_body(bh, ih);
1261		unsigned long blocks;
1262
1263		set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1264		set_inode_sd_version(inode, STAT_DATA_V1);
1265		inode->i_mode = sd_v1_mode(sd);
1266		set_nlink(inode, sd_v1_nlink(sd));
1267		i_uid_write(inode, sd_v1_uid(sd));
1268		i_gid_write(inode, sd_v1_gid(sd));
1269		inode->i_size = sd_v1_size(sd);
1270		inode->i_atime.tv_sec = sd_v1_atime(sd);
1271		inode->i_mtime.tv_sec = sd_v1_mtime(sd);
1272		inode->i_ctime.tv_sec = sd_v1_ctime(sd);
1273		inode->i_atime.tv_nsec = 0;
1274		inode->i_ctime.tv_nsec = 0;
1275		inode->i_mtime.tv_nsec = 0;
1276
1277		inode->i_blocks = sd_v1_blocks(sd);
1278		inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1279		blocks = (inode->i_size + 511) >> 9;
1280		blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
1281
1282		/*
1283		 * there was a bug in <=3.5.23 when i_blocks could take
1284		 * negative values. Starting from 3.5.17 this value could
1285		 * even be stored in stat data. For such files we set
1286		 * i_blocks based on file size. Just 2 notes: this can be
1287		 * wrong for sparse files. On-disk value will be only
1288		 * updated if file's inode will ever change
1289		 */
1290		if (inode->i_blocks > blocks) {
 
 
 
 
 
1291			inode->i_blocks = blocks;
1292		}
1293
1294		rdev = sd_v1_rdev(sd);
1295		REISERFS_I(inode)->i_first_direct_byte =
1296		    sd_v1_first_direct_byte(sd);
1297
1298		/*
1299		 * an early bug in the quota code can give us an odd
1300		 * number for the block count.  This is incorrect, fix it here.
1301		 */
1302		if (inode->i_blocks & 1) {
1303			inode->i_blocks++;
1304		}
1305		inode_set_bytes(inode,
1306				to_real_used_space(inode, inode->i_blocks,
1307						   SD_V1_SIZE));
1308		/*
1309		 * nopack is initially zero for v1 objects. For v2 objects,
1310		 * nopack is initialised from sd_attrs
1311		 */
1312		REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
1313	} else {
1314		/*
1315		 * new stat data found, but object may have old items
1316		 * (directories and symlinks)
1317		 */
1318		struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih);
1319
1320		inode->i_mode = sd_v2_mode(sd);
1321		set_nlink(inode, sd_v2_nlink(sd));
1322		i_uid_write(inode, sd_v2_uid(sd));
1323		inode->i_size = sd_v2_size(sd);
1324		i_gid_write(inode, sd_v2_gid(sd));
1325		inode->i_mtime.tv_sec = sd_v2_mtime(sd);
1326		inode->i_atime.tv_sec = sd_v2_atime(sd);
1327		inode->i_ctime.tv_sec = sd_v2_ctime(sd);
1328		inode->i_ctime.tv_nsec = 0;
1329		inode->i_mtime.tv_nsec = 0;
1330		inode->i_atime.tv_nsec = 0;
1331		inode->i_blocks = sd_v2_blocks(sd);
1332		rdev = sd_v2_rdev(sd);
1333		if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1334			inode->i_generation =
1335			    le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1336		else
1337			inode->i_generation = sd_v2_generation(sd);
1338
1339		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1340			set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1341		else
1342			set_inode_item_key_version(inode, KEY_FORMAT_3_6);
1343		REISERFS_I(inode)->i_first_direct_byte = 0;
1344		set_inode_sd_version(inode, STAT_DATA_V2);
1345		inode_set_bytes(inode,
1346				to_real_used_space(inode, inode->i_blocks,
1347						   SD_V2_SIZE));
1348		/*
1349		 * read persistent inode attributes from sd and initialise
1350		 * generic inode flags from them
1351		 */
1352		REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
1353		sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
1354	}
1355
1356	pathrelse(path);
1357	if (S_ISREG(inode->i_mode)) {
1358		inode->i_op = &reiserfs_file_inode_operations;
1359		inode->i_fop = &reiserfs_file_operations;
1360		inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1361	} else if (S_ISDIR(inode->i_mode)) {
1362		inode->i_op = &reiserfs_dir_inode_operations;
1363		inode->i_fop = &reiserfs_dir_operations;
1364	} else if (S_ISLNK(inode->i_mode)) {
1365		inode->i_op = &reiserfs_symlink_inode_operations;
1366		inode_nohighmem(inode);
1367		inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1368	} else {
1369		inode->i_blocks = 0;
1370		inode->i_op = &reiserfs_special_inode_operations;
1371		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
1372	}
1373}
1374
1375/* update new stat data with inode fields */
1376static void inode2sd(void *sd, struct inode *inode, loff_t size)
1377{
1378	struct stat_data *sd_v2 = (struct stat_data *)sd;
 
1379
1380	set_sd_v2_mode(sd_v2, inode->i_mode);
1381	set_sd_v2_nlink(sd_v2, inode->i_nlink);
1382	set_sd_v2_uid(sd_v2, i_uid_read(inode));
1383	set_sd_v2_size(sd_v2, size);
1384	set_sd_v2_gid(sd_v2, i_gid_read(inode));
1385	set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
1386	set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
1387	set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
1388	set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
1389	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1390		set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
1391	else
1392		set_sd_v2_generation(sd_v2, inode->i_generation);
1393	set_sd_v2_attrs(sd_v2, REISERFS_I(inode)->i_attrs);
 
 
1394}
1395
1396/* used to copy inode's fields to old stat data */
1397static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
1398{
1399	struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
1400
1401	set_sd_v1_mode(sd_v1, inode->i_mode);
1402	set_sd_v1_uid(sd_v1, i_uid_read(inode));
1403	set_sd_v1_gid(sd_v1, i_gid_read(inode));
1404	set_sd_v1_nlink(sd_v1, inode->i_nlink);
1405	set_sd_v1_size(sd_v1, size);
1406	set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
1407	set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
1408	set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
1409
1410	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1411		set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
1412	else
1413		set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
1414
1415	/* Sigh. i_first_direct_byte is back */
1416	set_sd_v1_first_direct_byte(sd_v1,
1417				    REISERFS_I(inode)->i_first_direct_byte);
1418}
1419
1420/*
1421 * NOTE, you must prepare the buffer head before sending it here,
1422 * and then log it after the call
1423 */
1424static void update_stat_data(struct treepath *path, struct inode *inode,
1425			     loff_t size)
1426{
1427	struct buffer_head *bh;
1428	struct item_head *ih;
1429
1430	bh = PATH_PLAST_BUFFER(path);
1431	ih = tp_item_head(path);
1432
1433	if (!is_statdata_le_ih(ih))
1434		reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
1435			       INODE_PKEY(inode), ih);
1436
1437	/* path points to old stat data */
1438	if (stat_data_v1(ih)) {
1439		inode2sd_v1(ih_item_body(bh, ih), inode, size);
 
1440	} else {
1441		inode2sd(ih_item_body(bh, ih), inode, size);
1442	}
1443
1444	return;
1445}
1446
1447void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
1448			     struct inode *inode, loff_t size)
1449{
1450	struct cpu_key key;
1451	INITIALIZE_PATH(path);
1452	struct buffer_head *bh;
1453	int fs_gen;
1454	struct item_head *ih, tmp_ih;
1455	int retval;
1456
1457	BUG_ON(!th->t_trans_id);
1458
1459	/* key type is unimportant */
1460	make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
1461
1462	for (;;) {
1463		int pos;
1464		/* look for the object's stat data */
1465		retval = search_item(inode->i_sb, &key, &path);
1466		if (retval == IO_ERROR) {
1467			reiserfs_error(inode->i_sb, "vs-13050",
1468				       "i/o failure occurred trying to "
1469				       "update %K stat data", &key);
1470			return;
1471		}
1472		if (retval == ITEM_NOT_FOUND) {
1473			pos = PATH_LAST_POSITION(&path);
1474			pathrelse(&path);
1475			if (inode->i_nlink == 0) {
1476				/*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */
1477				return;
1478			}
1479			reiserfs_warning(inode->i_sb, "vs-13060",
1480					 "stat data of object %k (nlink == %d) "
1481					 "not found (pos %d)",
1482					 INODE_PKEY(inode), inode->i_nlink,
1483					 pos);
1484			reiserfs_check_path(&path);
1485			return;
1486		}
1487
1488		/*
1489		 * sigh, prepare_for_journal might schedule.  When it
1490		 * schedules the FS might change.  We have to detect that,
1491		 * and loop back to the search if the stat data item has moved
1492		 */
1493		bh = get_last_bh(&path);
1494		ih = tp_item_head(&path);
1495		copy_item_head(&tmp_ih, ih);
1496		fs_gen = get_generation(inode->i_sb);
1497		reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
1498
1499		/* Stat_data item has been moved after scheduling. */
1500		if (fs_changed(fs_gen, inode->i_sb)
1501		    && item_moved(&tmp_ih, &path)) {
1502			reiserfs_restore_prepared_buffer(inode->i_sb, bh);
1503			continue;
1504		}
1505		break;
1506	}
1507	update_stat_data(&path, inode, size);
1508	journal_mark_dirty(th, bh);
1509	pathrelse(&path);
1510	return;
1511}
1512
1513/*
1514 * reiserfs_read_locked_inode is called to read the inode off disk, and it
1515 * does a make_bad_inode when things go wrong.  But, we need to make sure
1516 * and clear the key in the private portion of the inode, otherwise a
1517 * corresponding iput might try to delete whatever object the inode last
1518 * represented.
1519 */
1520static void reiserfs_make_bad_inode(struct inode *inode)
1521{
1522	memset(INODE_PKEY(inode), 0, KEY_SIZE);
1523	make_bad_inode(inode);
1524}
1525
1526/*
1527 * initially this function was derived from minix or ext2's analog and
1528 * evolved as the prototype did
1529 */
 
1530int reiserfs_init_locked_inode(struct inode *inode, void *p)
1531{
1532	struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
1533	inode->i_ino = args->objectid;
1534	INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
1535	return 0;
1536}
1537
1538/*
1539 * looks for stat data in the tree, and fills up the fields of in-core
1540 * inode stat data fields
1541 */
1542void reiserfs_read_locked_inode(struct inode *inode,
1543				struct reiserfs_iget_args *args)
1544{
1545	INITIALIZE_PATH(path_to_sd);
1546	struct cpu_key key;
1547	unsigned long dirino;
1548	int retval;
1549
1550	dirino = args->dirid;
1551
1552	/*
1553	 * set version 1, version 2 could be used too, because stat data
1554	 * key is the same in both versions
1555	 */
1556	key.version = KEY_FORMAT_3_5;
1557	key.on_disk_key.k_dir_id = dirino;
1558	key.on_disk_key.k_objectid = inode->i_ino;
1559	key.on_disk_key.k_offset = 0;
1560	key.on_disk_key.k_type = 0;
1561
1562	/* look for the object's stat data */
1563	retval = search_item(inode->i_sb, &key, &path_to_sd);
1564	if (retval == IO_ERROR) {
1565		reiserfs_error(inode->i_sb, "vs-13070",
1566			       "i/o failure occurred trying to find "
1567			       "stat data of %K", &key);
1568		reiserfs_make_bad_inode(inode);
1569		return;
1570	}
1571
1572	/* a stale NFS handle can trigger this without it being an error */
1573	if (retval != ITEM_FOUND) {
 
1574		pathrelse(&path_to_sd);
1575		reiserfs_make_bad_inode(inode);
1576		clear_nlink(inode);
1577		return;
1578	}
1579
1580	init_inode(inode, &path_to_sd);
1581
1582	/*
1583	 * It is possible that knfsd is trying to access inode of a file
1584	 * that is being removed from the disk by some other thread. As we
1585	 * update sd on unlink all that is required is to check for nlink
1586	 * here. This bug was first found by Sizif when debugging
1587	 * SquidNG/Butterfly, forgotten, and found again after Philippe
1588	 * Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
1589
1590	 * More logical fix would require changes in fs/inode.c:iput() to
1591	 * remove inode from hash-table _after_ fs cleaned disk stuff up and
1592	 * in iget() to return NULL if I_FREEING inode is found in
1593	 * hash-table.
1594	 */
1595
1596	/*
1597	 * Currently there is one place where it's ok to meet inode with
1598	 * nlink==0: processing of open-unlinked and half-truncated files
1599	 * during mount (fs/reiserfs/super.c:finish_unfinished()).
1600	 */
1601	if ((inode->i_nlink == 0) &&
1602	    !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
1603		reiserfs_warning(inode->i_sb, "vs-13075",
1604				 "dead inode read from disk %K. "
1605				 "This is likely to be race with knfsd. Ignore",
1606				 &key);
1607		reiserfs_make_bad_inode(inode);
1608	}
1609
1610	/* init inode should be relsing */
1611	reiserfs_check_path(&path_to_sd);
1612
1613	/*
1614	 * Stat data v1 doesn't support ACLs.
1615	 */
1616	if (get_inode_sd_version(inode) == STAT_DATA_V1)
1617		cache_no_acl(inode);
1618}
1619
1620/*
1621 * reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
1622 *
1623 * @inode:    inode from hash table to check
1624 * @opaque:   "cookie" passed to iget5_locked(). This is &reiserfs_iget_args.
1625 *
1626 * This function is called by iget5_locked() to distinguish reiserfs inodes
1627 * having the same inode numbers. Such inodes can only exist due to some
1628 * error condition. One of them should be bad. Inodes with identical
1629 * inode numbers (objectids) are distinguished by parent directory ids.
1630 *
1631 */
1632int reiserfs_find_actor(struct inode *inode, void *opaque)
1633{
1634	struct reiserfs_iget_args *args;
1635
1636	args = opaque;
1637	/* args is already in CPU order */
1638	return (inode->i_ino == args->objectid) &&
1639	    (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
1640}
1641
1642struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1643{
1644	struct inode *inode;
1645	struct reiserfs_iget_args args;
1646	int depth;
1647
1648	args.objectid = key->on_disk_key.k_objectid;
1649	args.dirid = key->on_disk_key.k_dir_id;
1650	depth = reiserfs_write_unlock_nested(s);
1651	inode = iget5_locked(s, key->on_disk_key.k_objectid,
1652			     reiserfs_find_actor, reiserfs_init_locked_inode,
1653			     (void *)(&args));
1654	reiserfs_write_lock_nested(s, depth);
1655	if (!inode)
1656		return ERR_PTR(-ENOMEM);
1657
1658	if (inode->i_state & I_NEW) {
1659		reiserfs_read_locked_inode(inode, &args);
1660		unlock_new_inode(inode);
1661	}
1662
1663	if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) {
1664		/* either due to i/o error or a stale NFS handle */
1665		iput(inode);
1666		inode = NULL;
1667	}
1668	return inode;
1669}
1670
1671static struct dentry *reiserfs_get_dentry(struct super_block *sb,
1672	u32 objectid, u32 dir_id, u32 generation)
1673
1674{
1675	struct cpu_key key;
1676	struct inode *inode;
1677
1678	key.on_disk_key.k_objectid = objectid;
1679	key.on_disk_key.k_dir_id = dir_id;
1680	reiserfs_write_lock(sb);
1681	inode = reiserfs_iget(sb, &key);
1682	if (inode && !IS_ERR(inode) && generation != 0 &&
1683	    generation != inode->i_generation) {
1684		iput(inode);
1685		inode = NULL;
1686	}
1687	reiserfs_write_unlock(sb);
1688
1689	return d_obtain_alias(inode);
1690}
1691
1692struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1693		int fh_len, int fh_type)
1694{
1695	/*
1696	 * fhtype happens to reflect the number of u32s encoded.
1697	 * due to a bug in earlier code, fhtype might indicate there
1698	 * are more u32s then actually fitted.
1699	 * so if fhtype seems to be more than len, reduce fhtype.
1700	 * Valid types are:
1701	 *   2 - objectid + dir_id - legacy support
1702	 *   3 - objectid + dir_id + generation
1703	 *   4 - objectid + dir_id + objectid and dirid of parent - legacy
1704	 *   5 - objectid + dir_id + generation + objectid and dirid of parent
1705	 *   6 - as above plus generation of directory
1706	 * 6 does not fit in NFSv2 handles
1707	 */
1708	if (fh_type > fh_len) {
1709		if (fh_type != 6 || fh_len != 5)
1710			reiserfs_warning(sb, "reiserfs-13077",
1711				"nfsd/reiserfs, fhtype=%d, len=%d - odd",
1712				fh_type, fh_len);
1713		fh_type = fh_len;
1714	}
1715	if (fh_len < 2)
1716		return NULL;
1717
1718	return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
1719		(fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
1720}
1721
1722struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1723		int fh_len, int fh_type)
1724{
1725	if (fh_type > fh_len)
1726		fh_type = fh_len;
1727	if (fh_type < 4)
1728		return NULL;
1729
1730	return reiserfs_get_dentry(sb,
1731		(fh_type >= 5) ? fid->raw[3] : fid->raw[2],
1732		(fh_type >= 5) ? fid->raw[4] : fid->raw[3],
1733		(fh_type == 6) ? fid->raw[5] : 0);
1734}
1735
1736int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
1737		       struct inode *parent)
1738{
 
1739	int maxlen = *lenp;
1740
1741	if (parent && (maxlen < 5)) {
1742		*lenp = 5;
1743		return FILEID_INVALID;
1744	} else if (maxlen < 3) {
1745		*lenp = 3;
1746		return FILEID_INVALID;
1747	}
1748
1749	data[0] = inode->i_ino;
1750	data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1751	data[2] = inode->i_generation;
1752	*lenp = 3;
1753	if (parent) {
1754		data[3] = parent->i_ino;
1755		data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
1756		*lenp = 5;
1757		if (maxlen >= 6) {
1758			data[5] = parent->i_generation;
1759			*lenp = 6;
1760		}
 
 
 
 
1761	}
 
1762	return *lenp;
1763}
1764
1765/*
1766 * looks for stat data, then copies fields to it, marks the buffer
1767 * containing stat data as dirty
1768 */
1769/*
1770 * reiserfs inodes are never really dirty, since the dirty inode call
1771 * always logs them.  This call allows the VFS inode marking routines
1772 * to properly mark inodes for datasync and such, but only actually
1773 * does something when called for a synchronous update.
1774 */
1775int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1776{
1777	struct reiserfs_transaction_handle th;
1778	int jbegin_count = 1;
1779
1780	if (sb_rdonly(inode->i_sb))
1781		return -EROFS;
1782	/*
1783	 * memory pressure can sometimes initiate write_inode calls with
1784	 * sync == 1,
1785	 * these cases are just when the system needs ram, not when the
1786	 * inode needs to reach disk for safety, and they can safely be
1787	 * ignored because the altered inode has already been logged.
1788	 */
1789	if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
1790		reiserfs_write_lock(inode->i_sb);
1791		if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
1792			reiserfs_update_sd(&th, inode);
1793			journal_end_sync(&th);
1794		}
1795		reiserfs_write_unlock(inode->i_sb);
1796	}
1797	return 0;
1798}
1799
1800/*
1801 * stat data of new object is inserted already, this inserts the item
1802 * containing "." and ".." entries
1803 */
1804static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
1805				  struct inode *inode,
1806				  struct item_head *ih, struct treepath *path,
1807				  struct inode *dir)
1808{
1809	struct super_block *sb = th->t_super;
1810	char empty_dir[EMPTY_DIR_SIZE];
1811	char *body = empty_dir;
1812	struct cpu_key key;
1813	int retval;
1814
1815	BUG_ON(!th->t_trans_id);
1816
1817	_make_cpu_key(&key, KEY_FORMAT_3_5, le32_to_cpu(ih->ih_key.k_dir_id),
1818		      le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
1819		      TYPE_DIRENTRY, 3 /*key length */ );
1820
1821	/*
1822	 * compose item head for new item. Directories consist of items of
1823	 * old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
1824	 * is done by reiserfs_new_inode
1825	 */
1826	if (old_format_only(sb)) {
1827		make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1828				  TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
1829
1830		make_empty_dir_item_v1(body, ih->ih_key.k_dir_id,
1831				       ih->ih_key.k_objectid,
1832				       INODE_PKEY(dir)->k_dir_id,
1833				       INODE_PKEY(dir)->k_objectid);
1834	} else {
1835		make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
1836				  TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
1837
1838		make_empty_dir_item(body, ih->ih_key.k_dir_id,
1839				    ih->ih_key.k_objectid,
1840				    INODE_PKEY(dir)->k_dir_id,
1841				    INODE_PKEY(dir)->k_objectid);
1842	}
1843
1844	/* look for place in the tree for new item */
1845	retval = search_item(sb, &key, path);
1846	if (retval == IO_ERROR) {
1847		reiserfs_error(sb, "vs-13080",
1848			       "i/o failure occurred creating new directory");
1849		return -EIO;
1850	}
1851	if (retval == ITEM_FOUND) {
1852		pathrelse(path);
1853		reiserfs_warning(sb, "vs-13070",
1854				 "object with this key exists (%k)",
1855				 &(ih->ih_key));
1856		return -EEXIST;
1857	}
1858
1859	/* insert item, that is empty directory item */
1860	return reiserfs_insert_item(th, path, &key, ih, inode, body);
1861}
1862
1863/*
1864 * stat data of object has been inserted, this inserts the item
1865 * containing the body of symlink
1866 */
1867static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th,
1868				struct inode *inode,
1869				struct item_head *ih,
1870				struct treepath *path, const char *symname,
1871				int item_len)
1872{
1873	struct super_block *sb = th->t_super;
1874	struct cpu_key key;
1875	int retval;
1876
1877	BUG_ON(!th->t_trans_id);
1878
1879	_make_cpu_key(&key, KEY_FORMAT_3_5,
1880		      le32_to_cpu(ih->ih_key.k_dir_id),
1881		      le32_to_cpu(ih->ih_key.k_objectid),
1882		      1, TYPE_DIRECT, 3 /*key length */ );
1883
1884	make_le_item_head(ih, NULL, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len,
1885			  0 /*free_space */ );
1886
1887	/* look for place in the tree for new item */
1888	retval = search_item(sb, &key, path);
1889	if (retval == IO_ERROR) {
1890		reiserfs_error(sb, "vs-13080",
1891			       "i/o failure occurred creating new symlink");
1892		return -EIO;
1893	}
1894	if (retval == ITEM_FOUND) {
1895		pathrelse(path);
1896		reiserfs_warning(sb, "vs-13080",
1897				 "object with this key exists (%k)",
1898				 &(ih->ih_key));
1899		return -EEXIST;
1900	}
1901
1902	/* insert item, that is body of symlink */
1903	return reiserfs_insert_item(th, path, &key, ih, inode, symname);
1904}
1905
1906/*
1907 * inserts the stat data into the tree, and then calls
1908 * reiserfs_new_directory (to insert ".", ".." item if new object is
1909 * directory) or reiserfs_new_symlink (to insert symlink body if new
1910 * object is symlink) or nothing (if new object is regular file)
1911
1912 * NOTE! uid and gid must already be set in the inode.  If we return
1913 * non-zero due to an error, we have to drop the quota previously allocated
1914 * for the fresh inode.  This can only be done outside a transaction, so
1915 * if we return non-zero, we also end the transaction.
1916 *
1917 * @th: active transaction handle
1918 * @dir: parent directory for new inode
1919 * @mode: mode of new inode
1920 * @symname: symlink contents if inode is symlink
1921 * @isize: 0 for regular file, EMPTY_DIR_SIZE for dirs, strlen(symname) for
1922 *         symlinks
1923 * @inode: inode to be filled
1924 * @security: optional security context to associate with this inode
1925 */
1926int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1927		       struct inode *dir, umode_t mode, const char *symname,
1928		       /* 0 for regular, EMTRY_DIR_SIZE for dirs,
1929		          strlen (symname) for symlinks) */
1930		       loff_t i_size, struct dentry *dentry,
1931		       struct inode *inode,
1932		       struct reiserfs_security_handle *security)
1933{
1934	struct super_block *sb = dir->i_sb;
1935	struct reiserfs_iget_args args;
1936	INITIALIZE_PATH(path_to_key);
1937	struct cpu_key key;
1938	struct item_head ih;
1939	struct stat_data sd;
1940	int retval;
1941	int err;
1942	int depth;
1943
1944	BUG_ON(!th->t_trans_id);
1945
1946	depth = reiserfs_write_unlock_nested(sb);
1947	err = dquot_alloc_inode(inode);
1948	reiserfs_write_lock_nested(sb, depth);
1949	if (err)
1950		goto out_end_trans;
1951	if (!dir->i_nlink) {
1952		err = -EPERM;
1953		goto out_bad_inode;
1954	}
1955
 
 
1956	/* item head of new item */
1957	ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
1958	ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
1959	if (!ih.ih_key.k_objectid) {
1960		err = -ENOMEM;
1961		goto out_bad_inode;
1962	}
1963	args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
1964	if (old_format_only(sb))
1965		make_le_item_head(&ih, NULL, KEY_FORMAT_3_5, SD_OFFSET,
1966				  TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
1967	else
1968		make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET,
1969				  TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
1970	memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE);
1971	args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
1972
1973	depth = reiserfs_write_unlock_nested(inode->i_sb);
1974	err = insert_inode_locked4(inode, args.objectid,
1975			     reiserfs_find_actor, &args);
1976	reiserfs_write_lock_nested(inode->i_sb, depth);
1977	if (err) {
1978		err = -EINVAL;
1979		goto out_bad_inode;
1980	}
1981
1982	if (old_format_only(sb))
1983		/*
1984		 * not a perfect generation count, as object ids can be reused,
1985		 * but this is as good as reiserfs can do right now.
1986		 * note that the private part of inode isn't filled in yet,
1987		 * we have to use the directory.
1988		 */
1989		inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
1990	else
1991#if defined( USE_INODE_GENERATION_COUNTER )
1992		inode->i_generation =
1993		    le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
1994#else
1995		inode->i_generation = ++event;
1996#endif
1997
1998	/* fill stat data */
1999	set_nlink(inode, (S_ISDIR(mode) ? 2 : 1));
2000
2001	/* uid and gid must already be set by the caller for quota init */
2002
2003	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 
 
 
 
2004	inode->i_size = i_size;
2005	inode->i_blocks = 0;
2006	inode->i_bytes = 0;
2007	REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
2008	    U32_MAX /*NO_BYTES_IN_DIRECT_ITEM */ ;
2009
2010	INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
2011	REISERFS_I(inode)->i_flags = 0;
2012	REISERFS_I(inode)->i_prealloc_block = 0;
2013	REISERFS_I(inode)->i_prealloc_count = 0;
2014	REISERFS_I(inode)->i_trans_id = 0;
2015	REISERFS_I(inode)->i_jl = NULL;
2016	REISERFS_I(inode)->i_attrs =
2017	    REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
2018	sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
2019	reiserfs_init_xattr_rwsem(inode);
2020
2021	/* key to search for correct place for new stat data */
2022	_make_cpu_key(&key, KEY_FORMAT_3_6, le32_to_cpu(ih.ih_key.k_dir_id),
2023		      le32_to_cpu(ih.ih_key.k_objectid), SD_OFFSET,
2024		      TYPE_STAT_DATA, 3 /*key length */ );
2025
2026	/* find proper place for inserting of stat data */
2027	retval = search_item(sb, &key, &path_to_key);
2028	if (retval == IO_ERROR) {
2029		err = -EIO;
2030		goto out_bad_inode;
2031	}
2032	if (retval == ITEM_FOUND) {
2033		pathrelse(&path_to_key);
2034		err = -EEXIST;
2035		goto out_bad_inode;
2036	}
2037	if (old_format_only(sb)) {
2038		/* i_uid or i_gid is too big to be stored in stat data v3.5 */
2039		if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
2040			pathrelse(&path_to_key);
 
2041			err = -EINVAL;
2042			goto out_bad_inode;
2043		}
2044		inode2sd_v1(&sd, inode, inode->i_size);
2045	} else {
2046		inode2sd(&sd, inode, inode->i_size);
2047	}
2048	/*
2049	 * store in in-core inode the key of stat data and version all
2050	 * object items will have (directory items will have old offset
2051	 * format, other new objects will consist of new items)
2052	 */
2053	if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
2054		set_inode_item_key_version(inode, KEY_FORMAT_3_5);
2055	else
2056		set_inode_item_key_version(inode, KEY_FORMAT_3_6);
2057	if (old_format_only(sb))
2058		set_inode_sd_version(inode, STAT_DATA_V1);
2059	else
2060		set_inode_sd_version(inode, STAT_DATA_V2);
2061
2062	/* insert the stat data into the tree */
2063#ifdef DISPLACE_NEW_PACKING_LOCALITIES
2064	if (REISERFS_I(dir)->new_packing_locality)
2065		th->displace_new_blocks = 1;
2066#endif
2067	retval =
2068	    reiserfs_insert_item(th, &path_to_key, &key, &ih, inode,
2069				 (char *)(&sd));
2070	if (retval) {
2071		err = retval;
2072		reiserfs_check_path(&path_to_key);
2073		goto out_bad_inode;
2074	}
2075#ifdef DISPLACE_NEW_PACKING_LOCALITIES
2076	if (!th->displace_new_blocks)
2077		REISERFS_I(dir)->new_packing_locality = 0;
2078#endif
2079	if (S_ISDIR(mode)) {
2080		/* insert item with "." and ".." */
2081		retval =
2082		    reiserfs_new_directory(th, inode, &ih, &path_to_key, dir);
2083	}
2084
2085	if (S_ISLNK(mode)) {
2086		/* insert body of symlink */
2087		if (!old_format_only(sb))
2088			i_size = ROUND_UP(i_size);
2089		retval =
2090		    reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname,
2091					 i_size);
2092	}
2093	if (retval) {
2094		err = retval;
2095		reiserfs_check_path(&path_to_key);
2096		journal_end(th);
2097		goto out_inserted_sd;
2098	}
2099
2100	if (reiserfs_posixacl(inode->i_sb)) {
2101		reiserfs_write_unlock(inode->i_sb);
2102		retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
2103		reiserfs_write_lock(inode->i_sb);
2104		if (retval) {
2105			err = retval;
2106			reiserfs_check_path(&path_to_key);
2107			journal_end(th);
2108			goto out_inserted_sd;
2109		}
2110	} else if (inode->i_sb->s_flags & SB_POSIXACL) {
2111		reiserfs_warning(inode->i_sb, "jdm-13090",
2112				 "ACLs aren't enabled in the fs, "
2113				 "but vfs thinks they are!");
2114	} else if (IS_PRIVATE(dir))
2115		inode->i_flags |= S_PRIVATE;
2116
2117	if (security->name) {
2118		reiserfs_write_unlock(inode->i_sb);
2119		retval = reiserfs_security_write(th, inode, security);
2120		reiserfs_write_lock(inode->i_sb);
2121		if (retval) {
2122			err = retval;
2123			reiserfs_check_path(&path_to_key);
2124			retval = journal_end(th);
 
2125			if (retval)
2126				err = retval;
2127			goto out_inserted_sd;
2128		}
2129	}
2130
2131	reiserfs_update_sd(th, inode);
2132	reiserfs_check_path(&path_to_key);
2133
2134	return 0;
2135
2136out_bad_inode:
 
 
 
 
2137	/* Invalidate the object, nothing was inserted yet */
2138	INODE_PKEY(inode)->k_objectid = 0;
2139
2140	/* Quota change must be inside a transaction for journaling */
2141	depth = reiserfs_write_unlock_nested(inode->i_sb);
2142	dquot_free_inode(inode);
2143	reiserfs_write_lock_nested(inode->i_sb, depth);
2144
2145out_end_trans:
2146	journal_end(th);
2147	/*
2148	 * Drop can be outside and it needs more credits so it's better
2149	 * to have it outside
2150	 */
2151	depth = reiserfs_write_unlock_nested(inode->i_sb);
2152	dquot_drop(inode);
2153	reiserfs_write_lock_nested(inode->i_sb, depth);
2154	inode->i_flags |= S_NOQUOTA;
2155	make_bad_inode(inode);
2156
2157out_inserted_sd:
2158	clear_nlink(inode);
2159	th->t_trans_id = 0;	/* so the caller can't use this handle later */
2160	unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
2161	iput(inode);
2162	return err;
2163}
2164
2165/*
2166 * finds the tail page in the page cache,
2167 * reads the last block in.
2168 *
2169 * On success, page_result is set to a locked, pinned page, and bh_result
2170 * is set to an up to date buffer for the last block in the file.  returns 0.
2171 *
2172 * tail conversion is not done, so bh_result might not be valid for writing
2173 * check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
2174 * trying to write the block.
2175 *
2176 * on failure, nonzero is returned, page_result and bh_result are untouched.
2177 */
2178static int grab_tail_page(struct inode *inode,
2179			  struct page **page_result,
2180			  struct buffer_head **bh_result)
2181{
2182
2183	/*
2184	 * we want the page with the last byte in the file,
2185	 * not the page that will hold the next byte for appending
2186	 */
2187	unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
2188	unsigned long pos = 0;
2189	unsigned long start = 0;
2190	unsigned long blocksize = inode->i_sb->s_blocksize;
2191	unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
2192	struct buffer_head *bh;
2193	struct buffer_head *head;
2194	struct page *page;
2195	int error;
2196
2197	/*
2198	 * we know that we are only called with inode->i_size > 0.
2199	 * we also know that a file tail can never be as big as a block
2200	 * If i_size % blocksize == 0, our file is currently block aligned
2201	 * and it won't need converting or zeroing after a truncate.
2202	 */
2203	if ((offset & (blocksize - 1)) == 0) {
2204		return -ENOENT;
2205	}
2206	page = grab_cache_page(inode->i_mapping, index);
2207	error = -ENOMEM;
2208	if (!page) {
2209		goto out;
2210	}
2211	/* start within the page of the last block in the file */
2212	start = (offset / blocksize) * blocksize;
2213
2214	error = __block_write_begin(page, start, offset - start,
2215				    reiserfs_get_block_create_0);
2216	if (error)
2217		goto unlock;
2218
2219	head = page_buffers(page);
2220	bh = head;
2221	do {
2222		if (pos >= start) {
2223			break;
2224		}
2225		bh = bh->b_this_page;
2226		pos += blocksize;
2227	} while (bh != head);
2228
2229	if (!buffer_uptodate(bh)) {
2230		/*
2231		 * note, this should never happen, prepare_write should be
2232		 * taking care of this for us.  If the buffer isn't up to
2233		 * date, I've screwed up the code to find the buffer, or the
2234		 * code to call prepare_write
2235		 */
2236		reiserfs_error(inode->i_sb, "clm-6000",
2237			       "error reading block %lu", bh->b_blocknr);
2238		error = -EIO;
2239		goto unlock;
2240	}
2241	*bh_result = bh;
2242	*page_result = page;
2243
2244out:
2245	return error;
2246
2247unlock:
2248	unlock_page(page);
2249	put_page(page);
2250	return error;
2251}
2252
2253/*
2254 * vfs version of truncate file.  Must NOT be called with
2255 * a transaction already started.
2256 *
2257 * some code taken from block_truncate_page
2258 */
2259int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2260{
2261	struct reiserfs_transaction_handle th;
2262	/* we want the offset for the first byte after the end of the file */
2263	unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
2264	unsigned blocksize = inode->i_sb->s_blocksize;
2265	unsigned length;
2266	struct page *page = NULL;
2267	int error;
2268	struct buffer_head *bh = NULL;
2269	int err2;
 
2270
2271	reiserfs_write_lock(inode->i_sb);
2272
2273	if (inode->i_size > 0) {
2274		error = grab_tail_page(inode, &page, &bh);
2275		if (error) {
2276			/*
2277			 * -ENOENT means we truncated past the end of the
2278			 * file, and get_block_create_0 could not find a
2279			 * block to read in, which is ok.
2280			 */
2281			if (error != -ENOENT)
2282				reiserfs_error(inode->i_sb, "clm-6001",
2283					       "grab_tail_page failed %d",
2284					       error);
2285			page = NULL;
2286			bh = NULL;
2287		}
2288	}
2289
2290	/*
2291	 * so, if page != NULL, we have a buffer head for the offset at
2292	 * the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
2293	 * then we have an unformatted node.  Otherwise, we have a direct item,
2294	 * and no zeroing is required on disk.  We zero after the truncate,
2295	 * because the truncate might pack the item anyway
2296	 * (it will unmap bh if it packs).
2297	 *
2298	 * it is enough to reserve space in transaction for 2 balancings:
2299	 * one for "save" link adding and another for the first
2300	 * cut_from_item. 1 is for update_sd
2301	 */
2302	error = journal_begin(&th, inode->i_sb,
2303			      JOURNAL_PER_BALANCE_CNT * 2 + 1);
2304	if (error)
2305		goto out;
2306	reiserfs_update_inode_transaction(inode);
2307	if (update_timestamps)
2308		/*
2309		 * we are doing real truncate: if the system crashes
2310		 * before the last transaction of truncating gets committed
2311		 * - on reboot the file either appears truncated properly
2312		 * or not truncated at all
2313		 */
2314		add_save_link(&th, inode, 1);
2315	err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
2316	error = journal_end(&th);
 
2317	if (error)
2318		goto out;
2319
2320	/* check reiserfs_do_truncate after ending the transaction */
2321	if (err2) {
2322		error = err2;
2323  		goto out;
2324	}
2325	
2326	if (update_timestamps) {
2327		error = remove_save_link(inode, 1 /* truncate */);
2328		if (error)
2329			goto out;
2330	}
2331
2332	if (page) {
2333		length = offset & (blocksize - 1);
2334		/* if we are not on a block boundary */
2335		if (length) {
2336			length = blocksize - length;
2337			zero_user(page, offset, length);
2338			if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2339				mark_buffer_dirty(bh);
2340			}
2341		}
2342		unlock_page(page);
2343		put_page(page);
2344	}
2345
2346	reiserfs_write_unlock(inode->i_sb);
2347
2348	return 0;
2349out:
2350	if (page) {
2351		unlock_page(page);
2352		put_page(page);
2353	}
2354
2355	reiserfs_write_unlock(inode->i_sb);
2356
2357	return error;
2358}
2359
2360static int map_block_for_writepage(struct inode *inode,
2361				   struct buffer_head *bh_result,
2362				   unsigned long block)
2363{
2364	struct reiserfs_transaction_handle th;
2365	int fs_gen;
2366	struct item_head tmp_ih;
2367	struct item_head *ih;
2368	struct buffer_head *bh;
2369	__le32 *item;
2370	struct cpu_key key;
2371	INITIALIZE_PATH(path);
2372	int pos_in_item;
2373	int jbegin_count = JOURNAL_PER_BALANCE_CNT;
2374	loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
2375	int retval;
2376	int use_get_block = 0;
2377	int bytes_copied = 0;
2378	int copy_size;
2379	int trans_running = 0;
2380
2381	/*
2382	 * catch places below that try to log something without
2383	 * starting a trans
2384	 */
2385	th.t_trans_id = 0;
2386
2387	if (!buffer_uptodate(bh_result)) {
2388		return -EIO;
2389	}
2390
2391	kmap(bh_result->b_page);
2392start_over:
2393	reiserfs_write_lock(inode->i_sb);
2394	make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
2395
2396research:
2397	retval = search_for_position_by_key(inode->i_sb, &key, &path);
2398	if (retval != POSITION_FOUND) {
2399		use_get_block = 1;
2400		goto out;
2401	}
2402
2403	bh = get_last_bh(&path);
2404	ih = tp_item_head(&path);
2405	item = tp_item_body(&path);
2406	pos_in_item = path.pos_in_item;
2407
2408	/* we've found an unformatted node */
2409	if (indirect_item_found(retval, ih)) {
2410		if (bytes_copied > 0) {
2411			reiserfs_warning(inode->i_sb, "clm-6002",
2412					 "bytes_copied %d", bytes_copied);
2413		}
2414		if (!get_block_num(item, pos_in_item)) {
2415			/* crap, we are writing to a hole */
2416			use_get_block = 1;
2417			goto out;
2418		}
2419		set_block_dev_mapped(bh_result,
2420				     get_block_num(item, pos_in_item), inode);
2421	} else if (is_direct_le_ih(ih)) {
2422		char *p;
2423		p = page_address(bh_result->b_page);
2424		p += (byte_offset - 1) & (PAGE_SIZE - 1);
2425		copy_size = ih_item_len(ih) - pos_in_item;
2426
2427		fs_gen = get_generation(inode->i_sb);
2428		copy_item_head(&tmp_ih, ih);
2429
2430		if (!trans_running) {
2431			/* vs-3050 is gone, no need to drop the path */
2432			retval = journal_begin(&th, inode->i_sb, jbegin_count);
2433			if (retval)
2434				goto out;
2435			reiserfs_update_inode_transaction(inode);
2436			trans_running = 1;
2437			if (fs_changed(fs_gen, inode->i_sb)
2438			    && item_moved(&tmp_ih, &path)) {
2439				reiserfs_restore_prepared_buffer(inode->i_sb,
2440								 bh);
2441				goto research;
2442			}
2443		}
2444
2445		reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
2446
2447		if (fs_changed(fs_gen, inode->i_sb)
2448		    && item_moved(&tmp_ih, &path)) {
2449			reiserfs_restore_prepared_buffer(inode->i_sb, bh);
2450			goto research;
2451		}
2452
2453		memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied,
2454		       copy_size);
2455
2456		journal_mark_dirty(&th, bh);
2457		bytes_copied += copy_size;
2458		set_block_dev_mapped(bh_result, 0, inode);
2459
2460		/* are there still bytes left? */
2461		if (bytes_copied < bh_result->b_size &&
2462		    (byte_offset + bytes_copied) < inode->i_size) {
2463			set_cpu_key_k_offset(&key,
2464					     cpu_key_k_offset(&key) +
2465					     copy_size);
2466			goto research;
2467		}
2468	} else {
2469		reiserfs_warning(inode->i_sb, "clm-6003",
2470				 "bad item inode %lu", inode->i_ino);
2471		retval = -EIO;
2472		goto out;
2473	}
2474	retval = 0;
2475
2476out:
2477	pathrelse(&path);
2478	if (trans_running) {
2479		int err = journal_end(&th);
2480		if (err)
2481			retval = err;
2482		trans_running = 0;
2483	}
2484	reiserfs_write_unlock(inode->i_sb);
2485
2486	/* this is where we fill in holes in the file. */
2487	if (use_get_block) {
2488		retval = reiserfs_get_block(inode, block, bh_result,
2489					    GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
2490					    | GET_BLOCK_NO_DANGLE);
2491		if (!retval) {
2492			if (!buffer_mapped(bh_result)
2493			    || bh_result->b_blocknr == 0) {
2494				/* get_block failed to find a mapped unformatted node. */
2495				use_get_block = 0;
2496				goto start_over;
2497			}
2498		}
2499	}
2500	kunmap(bh_result->b_page);
2501
2502	if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
2503		/*
2504		 * we've copied data from the page into the direct item, so the
2505		 * buffer in the page is now clean, mark it to reflect that.
2506		 */
2507		lock_buffer(bh_result);
2508		clear_buffer_dirty(bh_result);
2509		unlock_buffer(bh_result);
2510	}
2511	return retval;
2512}
2513
2514/*
2515 * mason@suse.com: updated in 2.5.54 to follow the same general io
2516 * start/recovery path as __block_write_full_page, along with special
2517 * code to handle reiserfs tails.
2518 */
2519static int reiserfs_write_full_page(struct page *page,
2520				    struct writeback_control *wbc)
2521{
2522	struct inode *inode = page->mapping->host;
2523	unsigned long end_index = inode->i_size >> PAGE_SHIFT;
2524	int error = 0;
2525	unsigned long block;
2526	sector_t last_block;
2527	struct buffer_head *head, *bh;
2528	int partial = 0;
2529	int nr = 0;
2530	int checked = PageChecked(page);
2531	struct reiserfs_transaction_handle th;
2532	struct super_block *s = inode->i_sb;
2533	int bh_per_page = PAGE_SIZE / s->s_blocksize;
2534	th.t_trans_id = 0;
2535
2536	/* no logging allowed when nonblocking or from PF_MEMALLOC */
2537	if (checked && (current->flags & PF_MEMALLOC)) {
2538		redirty_page_for_writepage(wbc, page);
2539		unlock_page(page);
2540		return 0;
2541	}
2542
2543	/*
2544	 * The page dirty bit is cleared before writepage is called, which
2545	 * means we have to tell create_empty_buffers to make dirty buffers
2546	 * The page really should be up to date at this point, so tossing
2547	 * in the BH_Uptodate is just a sanity check.
2548	 */
2549	if (!page_has_buffers(page)) {
2550		create_empty_buffers(page, s->s_blocksize,
2551				     (1 << BH_Dirty) | (1 << BH_Uptodate));
2552	}
2553	head = page_buffers(page);
2554
2555	/*
2556	 * last page in the file, zero out any contents past the
2557	 * last byte in the file
2558	 */
2559	if (page->index >= end_index) {
2560		unsigned last_offset;
2561
2562		last_offset = inode->i_size & (PAGE_SIZE - 1);
2563		/* no file contents in this page */
2564		if (page->index >= end_index + 1 || !last_offset) {
2565			unlock_page(page);
2566			return 0;
2567		}
2568		zero_user_segment(page, last_offset, PAGE_SIZE);
2569	}
2570	bh = head;
2571	block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
2572	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2573	/* first map all the buffers, logging any direct items we find */
2574	do {
2575		if (block > last_block) {
2576			/*
2577			 * This can happen when the block size is less than
2578			 * the page size.  The corresponding bytes in the page
2579			 * were zero filled above
2580			 */
2581			clear_buffer_dirty(bh);
2582			set_buffer_uptodate(bh);
2583		} else if ((checked || buffer_dirty(bh)) &&
2584		           (!buffer_mapped(bh) || (buffer_mapped(bh)
2585						       && bh->b_blocknr ==
2586						       0))) {
2587			/*
2588			 * not mapped yet, or it points to a direct item, search
2589			 * the btree for the mapping info, and log any direct
2590			 * items found
2591			 */
2592			if ((error = map_block_for_writepage(inode, bh, block))) {
2593				goto fail;
2594			}
2595		}
2596		bh = bh->b_this_page;
2597		block++;
2598	} while (bh != head);
2599
2600	/*
2601	 * we start the transaction after map_block_for_writepage,
2602	 * because it can create holes in the file (an unbounded operation).
2603	 * starting it here, we can make a reliable estimate for how many
2604	 * blocks we're going to log
2605	 */
2606	if (checked) {
2607		ClearPageChecked(page);
2608		reiserfs_write_lock(s);
2609		error = journal_begin(&th, s, bh_per_page + 1);
2610		if (error) {
2611			reiserfs_write_unlock(s);
2612			goto fail;
2613		}
2614		reiserfs_update_inode_transaction(inode);
2615	}
2616	/* now go through and lock any dirty buffers on the page */
2617	do {
2618		get_bh(bh);
2619		if (!buffer_mapped(bh))
2620			continue;
2621		if (buffer_mapped(bh) && bh->b_blocknr == 0)
2622			continue;
2623
2624		if (checked) {
2625			reiserfs_prepare_for_journal(s, bh, 1);
2626			journal_mark_dirty(&th, bh);
2627			continue;
2628		}
2629		/*
2630		 * from this point on, we know the buffer is mapped to a
2631		 * real block and not a direct item
2632		 */
2633		if (wbc->sync_mode != WB_SYNC_NONE) {
2634			lock_buffer(bh);
2635		} else {
2636			if (!trylock_buffer(bh)) {
2637				redirty_page_for_writepage(wbc, page);
2638				continue;
2639			}
2640		}
2641		if (test_clear_buffer_dirty(bh)) {
2642			mark_buffer_async_write(bh);
2643		} else {
2644			unlock_buffer(bh);
2645		}
2646	} while ((bh = bh->b_this_page) != head);
2647
2648	if (checked) {
2649		error = journal_end(&th);
2650		reiserfs_write_unlock(s);
2651		if (error)
2652			goto fail;
2653	}
2654	BUG_ON(PageWriteback(page));
2655	set_page_writeback(page);
2656	unlock_page(page);
2657
2658	/*
2659	 * since any buffer might be the only dirty buffer on the page,
2660	 * the first submit_bh can bring the page out of writeback.
2661	 * be careful with the buffers.
2662	 */
2663	do {
2664		struct buffer_head *next = bh->b_this_page;
2665		if (buffer_async_write(bh)) {
2666			submit_bh(REQ_OP_WRITE, 0, bh);
2667			nr++;
2668		}
2669		put_bh(bh);
2670		bh = next;
2671	} while (bh != head);
2672
2673	error = 0;
2674done:
2675	if (nr == 0) {
2676		/*
2677		 * if this page only had a direct item, it is very possible for
2678		 * no io to be required without there being an error.  Or,
2679		 * someone else could have locked them and sent them down the
2680		 * pipe without locking the page
2681		 */
2682		bh = head;
2683		do {
2684			if (!buffer_uptodate(bh)) {
2685				partial = 1;
2686				break;
2687			}
2688			bh = bh->b_this_page;
2689		} while (bh != head);
2690		if (!partial)
2691			SetPageUptodate(page);
2692		end_page_writeback(page);
2693	}
2694	return error;
2695
2696fail:
2697	/*
2698	 * catches various errors, we need to make sure any valid dirty blocks
2699	 * get to the media.  The page is currently locked and not marked for
2700	 * writeback
2701	 */
2702	ClearPageUptodate(page);
2703	bh = head;
2704	do {
2705		get_bh(bh);
2706		if (buffer_mapped(bh) && buffer_dirty(bh) && bh->b_blocknr) {
2707			lock_buffer(bh);
2708			mark_buffer_async_write(bh);
2709		} else {
2710			/*
2711			 * clear any dirty bits that might have come from
2712			 * getting attached to a dirty page
2713			 */
2714			clear_buffer_dirty(bh);
2715		}
2716		bh = bh->b_this_page;
2717	} while (bh != head);
2718	SetPageError(page);
2719	BUG_ON(PageWriteback(page));
2720	set_page_writeback(page);
2721	unlock_page(page);
2722	do {
2723		struct buffer_head *next = bh->b_this_page;
2724		if (buffer_async_write(bh)) {
2725			clear_buffer_dirty(bh);
2726			submit_bh(REQ_OP_WRITE, 0, bh);
2727			nr++;
2728		}
2729		put_bh(bh);
2730		bh = next;
2731	} while (bh != head);
2732	goto done;
2733}
2734
2735static int reiserfs_readpage(struct file *f, struct page *page)
2736{
2737	return block_read_full_page(page, reiserfs_get_block);
2738}
2739
2740static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
2741{
2742	struct inode *inode = page->mapping->host;
2743	reiserfs_wait_on_write_block(inode->i_sb);
2744	return reiserfs_write_full_page(page, wbc);
2745}
2746
2747static void reiserfs_truncate_failed_write(struct inode *inode)
2748{
2749	truncate_inode_pages(inode->i_mapping, inode->i_size);
2750	reiserfs_truncate_file(inode, 0);
2751}
2752
2753static int reiserfs_write_begin(struct file *file,
2754				struct address_space *mapping,
2755				loff_t pos, unsigned len, unsigned flags,
2756				struct page **pagep, void **fsdata)
2757{
2758	struct inode *inode;
2759	struct page *page;
2760	pgoff_t index;
2761	int ret;
2762	int old_ref = 0;
2763
2764 	inode = mapping->host;
2765	*fsdata = NULL;
2766 	if (flags & AOP_FLAG_CONT_EXPAND &&
2767 	    (pos & (inode->i_sb->s_blocksize - 1)) == 0) {
2768 		pos ++;
2769		*fsdata = (void *)(unsigned long)flags;
2770	}
2771
2772	index = pos >> PAGE_SHIFT;
2773	page = grab_cache_page_write_begin(mapping, index, flags);
2774	if (!page)
2775		return -ENOMEM;
2776	*pagep = page;
2777
2778	reiserfs_wait_on_write_block(inode->i_sb);
2779	fix_tail_page_for_writing(page);
2780	if (reiserfs_transaction_running(inode->i_sb)) {
2781		struct reiserfs_transaction_handle *th;
2782		th = (struct reiserfs_transaction_handle *)current->
2783		    journal_info;
2784		BUG_ON(!th->t_refcount);
2785		BUG_ON(!th->t_trans_id);
2786		old_ref = th->t_refcount;
2787		th->t_refcount++;
2788	}
2789	ret = __block_write_begin(page, pos, len, reiserfs_get_block);
2790	if (ret && reiserfs_transaction_running(inode->i_sb)) {
2791		struct reiserfs_transaction_handle *th = current->journal_info;
2792		/*
2793		 * this gets a little ugly.  If reiserfs_get_block returned an
2794		 * error and left a transacstion running, we've got to close
2795		 * it, and we've got to free handle if it was a persistent
2796		 * transaction.
2797		 *
2798		 * But, if we had nested into an existing transaction, we need
2799		 * to just drop the ref count on the handle.
2800		 *
2801		 * If old_ref == 0, the transaction is from reiserfs_get_block,
2802		 * and it was a persistent trans.  Otherwise, it was nested
2803		 * above.
2804		 */
2805		if (th->t_refcount > old_ref) {
2806			if (old_ref)
2807				th->t_refcount--;
2808			else {
2809				int err;
2810				reiserfs_write_lock(inode->i_sb);
2811				err = reiserfs_end_persistent_transaction(th);
2812				reiserfs_write_unlock(inode->i_sb);
2813				if (err)
2814					ret = err;
2815			}
2816		}
2817	}
2818	if (ret) {
2819		unlock_page(page);
2820		put_page(page);
2821		/* Truncate allocated blocks */
2822		reiserfs_truncate_failed_write(inode);
2823	}
2824	return ret;
2825}
2826
2827int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
2828{
2829	struct inode *inode = page->mapping->host;
2830	int ret;
2831	int old_ref = 0;
2832	int depth;
2833
2834	depth = reiserfs_write_unlock_nested(inode->i_sb);
2835	reiserfs_wait_on_write_block(inode->i_sb);
2836	reiserfs_write_lock_nested(inode->i_sb, depth);
2837
2838	fix_tail_page_for_writing(page);
2839	if (reiserfs_transaction_running(inode->i_sb)) {
2840		struct reiserfs_transaction_handle *th;
2841		th = (struct reiserfs_transaction_handle *)current->
2842		    journal_info;
2843		BUG_ON(!th->t_refcount);
2844		BUG_ON(!th->t_trans_id);
2845		old_ref = th->t_refcount;
2846		th->t_refcount++;
2847	}
2848
2849	ret = __block_write_begin(page, from, len, reiserfs_get_block);
2850	if (ret && reiserfs_transaction_running(inode->i_sb)) {
2851		struct reiserfs_transaction_handle *th = current->journal_info;
2852		/*
2853		 * this gets a little ugly.  If reiserfs_get_block returned an
2854		 * error and left a transacstion running, we've got to close
2855		 * it, and we've got to free handle if it was a persistent
2856		 * transaction.
2857		 *
2858		 * But, if we had nested into an existing transaction, we need
2859		 * to just drop the ref count on the handle.
2860		 *
2861		 * If old_ref == 0, the transaction is from reiserfs_get_block,
2862		 * and it was a persistent trans.  Otherwise, it was nested
2863		 * above.
2864		 */
2865		if (th->t_refcount > old_ref) {
2866			if (old_ref)
2867				th->t_refcount--;
2868			else {
2869				int err;
2870				reiserfs_write_lock(inode->i_sb);
2871				err = reiserfs_end_persistent_transaction(th);
2872				reiserfs_write_unlock(inode->i_sb);
2873				if (err)
2874					ret = err;
2875			}
2876		}
2877	}
2878	return ret;
2879
2880}
2881
2882static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
2883{
2884	return generic_block_bmap(as, block, reiserfs_bmap);
2885}
2886
2887static int reiserfs_write_end(struct file *file, struct address_space *mapping,
2888			      loff_t pos, unsigned len, unsigned copied,
2889			      struct page *page, void *fsdata)
2890{
2891	struct inode *inode = page->mapping->host;
2892	int ret = 0;
2893	int update_sd = 0;
2894	struct reiserfs_transaction_handle *th;
2895	unsigned start;
 
2896	bool locked = false;
2897
2898	if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
2899		pos ++;
2900
2901	reiserfs_wait_on_write_block(inode->i_sb);
2902	if (reiserfs_transaction_running(inode->i_sb))
2903		th = current->journal_info;
2904	else
2905		th = NULL;
2906
2907	start = pos & (PAGE_SIZE - 1);
2908	if (unlikely(copied < len)) {
2909		if (!PageUptodate(page))
2910			copied = 0;
2911
2912		page_zero_new_buffers(page, start + copied, start + len);
2913	}
2914	flush_dcache_page(page);
2915
2916	reiserfs_commit_page(inode, page, start, start + copied);
2917
2918	/*
2919	 * generic_commit_write does this for us, but does not update the
2920	 * transaction tracking stuff when the size changes.  So, we have
2921	 * to do the i_size updates here.
2922	 */
2923	if (pos + copied > inode->i_size) {
2924		struct reiserfs_transaction_handle myth;
2925		reiserfs_write_lock(inode->i_sb);
2926		locked = true;
2927		/*
2928		 * If the file have grown beyond the border where it
2929		 * can have a tail, unmark it as needing a tail
2930		 * packing
2931		 */
2932		if ((have_large_tails(inode->i_sb)
2933		     && inode->i_size > i_block_size(inode) * 4)
2934		    || (have_small_tails(inode->i_sb)
2935			&& inode->i_size > i_block_size(inode)))
2936			REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2937
2938		ret = journal_begin(&myth, inode->i_sb, 1);
2939		if (ret)
2940			goto journal_error;
2941
2942		reiserfs_update_inode_transaction(inode);
2943		inode->i_size = pos + copied;
2944		/*
2945		 * this will just nest into our transaction.  It's important
2946		 * to use mark_inode_dirty so the inode gets pushed around on
2947		 * the dirty lists, and so that O_SYNC works as expected
2948		 */
2949		mark_inode_dirty(inode);
2950		reiserfs_update_sd(&myth, inode);
2951		update_sd = 1;
2952		ret = journal_end(&myth);
2953		if (ret)
2954			goto journal_error;
2955	}
2956	if (th) {
2957		if (!locked) {
2958			reiserfs_write_lock(inode->i_sb);
2959			locked = true;
2960		}
2961		if (!update_sd)
2962			mark_inode_dirty(inode);
2963		ret = reiserfs_end_persistent_transaction(th);
2964		if (ret)
2965			goto out;
2966	}
2967
2968out:
2969	if (locked)
2970		reiserfs_write_unlock(inode->i_sb);
2971	unlock_page(page);
2972	put_page(page);
2973
2974	if (pos + len > inode->i_size)
2975		reiserfs_truncate_failed_write(inode);
2976
2977	return ret == 0 ? copied : ret;
2978
2979journal_error:
2980	reiserfs_write_unlock(inode->i_sb);
2981	locked = false;
2982	if (th) {
2983		if (!update_sd)
2984			reiserfs_update_sd(th, inode);
2985		ret = reiserfs_end_persistent_transaction(th);
2986	}
2987	goto out;
2988}
2989
2990int reiserfs_commit_write(struct file *f, struct page *page,
2991			  unsigned from, unsigned to)
2992{
2993	struct inode *inode = page->mapping->host;
2994	loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
2995	int ret = 0;
2996	int update_sd = 0;
2997	struct reiserfs_transaction_handle *th = NULL;
2998	int depth;
2999
3000	depth = reiserfs_write_unlock_nested(inode->i_sb);
3001	reiserfs_wait_on_write_block(inode->i_sb);
3002	reiserfs_write_lock_nested(inode->i_sb, depth);
3003
3004	if (reiserfs_transaction_running(inode->i_sb)) {
3005		th = current->journal_info;
3006	}
3007	reiserfs_commit_page(inode, page, from, to);
3008
3009	/*
3010	 * generic_commit_write does this for us, but does not update the
3011	 * transaction tracking stuff when the size changes.  So, we have
3012	 * to do the i_size updates here.
3013	 */
3014	if (pos > inode->i_size) {
3015		struct reiserfs_transaction_handle myth;
3016		/*
3017		 * If the file have grown beyond the border where it
3018		 * can have a tail, unmark it as needing a tail
3019		 * packing
3020		 */
3021		if ((have_large_tails(inode->i_sb)
3022		     && inode->i_size > i_block_size(inode) * 4)
3023		    || (have_small_tails(inode->i_sb)
3024			&& inode->i_size > i_block_size(inode)))
3025			REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
3026
3027		ret = journal_begin(&myth, inode->i_sb, 1);
3028		if (ret)
3029			goto journal_error;
3030
3031		reiserfs_update_inode_transaction(inode);
3032		inode->i_size = pos;
3033		/*
3034		 * this will just nest into our transaction.  It's important
3035		 * to use mark_inode_dirty so the inode gets pushed around
3036		 * on the dirty lists, and so that O_SYNC works as expected
3037		 */
3038		mark_inode_dirty(inode);
3039		reiserfs_update_sd(&myth, inode);
3040		update_sd = 1;
3041		ret = journal_end(&myth);
3042		if (ret)
3043			goto journal_error;
3044	}
3045	if (th) {
3046		if (!update_sd)
3047			mark_inode_dirty(inode);
3048		ret = reiserfs_end_persistent_transaction(th);
3049		if (ret)
3050			goto out;
3051	}
3052
3053out:
3054	return ret;
3055
3056journal_error:
3057	if (th) {
3058		if (!update_sd)
3059			reiserfs_update_sd(th, inode);
3060		ret = reiserfs_end_persistent_transaction(th);
3061	}
3062
3063	return ret;
3064}
3065
3066void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
3067{
3068	if (reiserfs_attrs(inode->i_sb)) {
3069		if (sd_attrs & REISERFS_SYNC_FL)
3070			inode->i_flags |= S_SYNC;
3071		else
3072			inode->i_flags &= ~S_SYNC;
3073		if (sd_attrs & REISERFS_IMMUTABLE_FL)
3074			inode->i_flags |= S_IMMUTABLE;
3075		else
3076			inode->i_flags &= ~S_IMMUTABLE;
3077		if (sd_attrs & REISERFS_APPEND_FL)
3078			inode->i_flags |= S_APPEND;
3079		else
3080			inode->i_flags &= ~S_APPEND;
3081		if (sd_attrs & REISERFS_NOATIME_FL)
3082			inode->i_flags |= S_NOATIME;
3083		else
3084			inode->i_flags &= ~S_NOATIME;
3085		if (sd_attrs & REISERFS_NOTAIL_FL)
3086			REISERFS_I(inode)->i_flags |= i_nopack_mask;
3087		else
3088			REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
3089	}
3090}
3091
3092/*
3093 * decide if this buffer needs to stay around for data logging or ordered
3094 * write purposes
3095 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3096static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
3097{
3098	int ret = 1;
3099	struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3100
3101	lock_buffer(bh);
3102	spin_lock(&j->j_dirty_buffers_lock);
3103	if (!buffer_mapped(bh)) {
3104		goto free_jh;
3105	}
3106	/*
3107	 * the page is locked, and the only places that log a data buffer
3108	 * also lock the page.
3109	 */
3110	if (reiserfs_file_data_log(inode)) {
3111		/*
3112		 * very conservative, leave the buffer pinned if
3113		 * anyone might need it.
3114		 */
3115		if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
3116			ret = 0;
3117		}
3118	} else  if (buffer_dirty(bh)) {
3119		struct reiserfs_journal_list *jl;
3120		struct reiserfs_jh *jh = bh->b_private;
3121
3122		/*
3123		 * why is this safe?
3124		 * reiserfs_setattr updates i_size in the on disk
3125		 * stat data before allowing vmtruncate to be called.
3126		 *
3127		 * If buffer was put onto the ordered list for this
3128		 * transaction, we know for sure either this transaction
3129		 * or an older one already has updated i_size on disk,
3130		 * and this ordered data won't be referenced in the file
3131		 * if we crash.
3132		 *
3133		 * if the buffer was put onto the ordered list for an older
3134		 * transaction, we need to leave it around
3135		 */
3136		if (jh && (jl = jh->jl)
3137		    && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
3138			ret = 0;
3139	}
3140free_jh:
3141	if (ret && bh->b_private) {
3142		reiserfs_free_jh(bh);
3143	}
3144	spin_unlock(&j->j_dirty_buffers_lock);
3145	unlock_buffer(bh);
3146	return ret;
3147}
3148
3149/* clm -- taken from fs/buffer.c:block_invalidate_page */
3150static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
3151				    unsigned int length)
3152{
3153	struct buffer_head *head, *bh, *next;
3154	struct inode *inode = page->mapping->host;
3155	unsigned int curr_off = 0;
3156	unsigned int stop = offset + length;
3157	int partial_page = (offset || length < PAGE_SIZE);
3158	int ret = 1;
3159
3160	BUG_ON(!PageLocked(page));
3161
3162	if (!partial_page)
3163		ClearPageChecked(page);
3164
3165	if (!page_has_buffers(page))
3166		goto out;
3167
3168	head = page_buffers(page);
3169	bh = head;
3170	do {
3171		unsigned int next_off = curr_off + bh->b_size;
3172		next = bh->b_this_page;
3173
3174		if (next_off > stop)
3175			goto out;
3176
3177		/*
3178		 * is this block fully invalidated?
3179		 */
3180		if (offset <= curr_off) {
3181			if (invalidatepage_can_drop(inode, bh))
3182				reiserfs_unmap_buffer(bh);
3183			else
3184				ret = 0;
3185		}
3186		curr_off = next_off;
3187		bh = next;
3188	} while (bh != head);
3189
3190	/*
3191	 * We release buffers only if the entire page is being invalidated.
3192	 * The get_block cached value has been unconditionally invalidated,
3193	 * so real IO is not possible anymore.
3194	 */
3195	if (!partial_page && ret) {
3196		ret = try_to_release_page(page, 0);
3197		/* maybe should BUG_ON(!ret); - neilb */
3198	}
3199out:
3200	return;
3201}
3202
3203static int reiserfs_set_page_dirty(struct page *page)
3204{
3205	struct inode *inode = page->mapping->host;
3206	if (reiserfs_file_data_log(inode)) {
3207		SetPageChecked(page);
3208		return __set_page_dirty_nobuffers(page);
3209	}
3210	return __set_page_dirty_buffers(page);
3211}
3212
3213/*
3214 * Returns 1 if the page's buffers were dropped.  The page is locked.
3215 *
3216 * Takes j_dirty_buffers_lock to protect the b_assoc_buffers list_heads
3217 * in the buffers at page_buffers(page).
3218 *
3219 * even in -o notail mode, we can't be sure an old mount without -o notail
3220 * didn't create files with tails.
3221 */
3222static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
3223{
3224	struct inode *inode = page->mapping->host;
3225	struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3226	struct buffer_head *head;
3227	struct buffer_head *bh;
3228	int ret = 1;
3229
3230	WARN_ON(PageChecked(page));
3231	spin_lock(&j->j_dirty_buffers_lock);
3232	head = page_buffers(page);
3233	bh = head;
3234	do {
3235		if (bh->b_private) {
3236			if (!buffer_dirty(bh) && !buffer_locked(bh)) {
3237				reiserfs_free_jh(bh);
3238			} else {
3239				ret = 0;
3240				break;
3241			}
3242		}
3243		bh = bh->b_this_page;
3244	} while (bh != head);
3245	if (ret)
3246		ret = try_to_free_buffers(page);
3247	spin_unlock(&j->j_dirty_buffers_lock);
3248	return ret;
3249}
3250
3251/*
3252 * We thank Mingming Cao for helping us understand in great detail what
3253 * to do in this section of the code.
3254 */
3255static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3256{
3257	struct file *file = iocb->ki_filp;
3258	struct inode *inode = file->f_mapping->host;
3259	size_t count = iov_iter_count(iter);
3260	ssize_t ret;
3261
3262	ret = blockdev_direct_IO(iocb, inode, iter,
3263				 reiserfs_get_blocks_direct_io);
3264
3265	/*
3266	 * In case of error extending write may have instantiated a few
3267	 * blocks outside i_size. Trim these off again.
3268	 */
3269	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
3270		loff_t isize = i_size_read(inode);
3271		loff_t end = iocb->ki_pos + count;
3272
3273		if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
3274			truncate_setsize(inode, isize);
3275			reiserfs_vfs_truncate_file(inode);
3276		}
3277	}
3278
3279	return ret;
3280}
3281
3282int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3283{
3284	struct inode *inode = d_inode(dentry);
3285	unsigned int ia_valid;
 
3286	int error;
3287
3288	error = setattr_prepare(dentry, attr);
3289	if (error)
3290		return error;
3291
3292	/* must be turned off for recursive notify_change calls */
3293	ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3294
3295	if (is_quota_modification(inode, attr)) {
3296		error = dquot_initialize(inode);
3297		if (error)
3298			return error;
3299	}
3300	reiserfs_write_lock(inode->i_sb);
3301	if (attr->ia_valid & ATTR_SIZE) {
3302		/*
3303		 * version 2 items will be caught by the s_maxbytes check
3304		 * done for us in vmtruncate
3305		 */
3306		if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
3307		    attr->ia_size > MAX_NON_LFS) {
3308			reiserfs_write_unlock(inode->i_sb);
3309			error = -EFBIG;
3310			goto out;
3311		}
3312
3313		inode_dio_wait(inode);
3314
3315		/* fill in hole pointers in the expanding truncate case. */
3316		if (attr->ia_size > inode->i_size) {
3317			error = generic_cont_expand_simple(inode, attr->ia_size);
3318			if (REISERFS_I(inode)->i_prealloc_count > 0) {
3319				int err;
3320				struct reiserfs_transaction_handle th;
3321				/* we're changing at most 2 bitmaps, inode + super */
3322				err = journal_begin(&th, inode->i_sb, 4);
3323				if (!err) {
3324					reiserfs_discard_prealloc(&th, inode);
3325					err = journal_end(&th);
3326				}
3327				if (err)
3328					error = err;
3329			}
3330			if (error) {
3331				reiserfs_write_unlock(inode->i_sb);
3332				goto out;
3333			}
3334			/*
3335			 * file size is changed, ctime and mtime are
3336			 * to be updated
3337			 */
3338			attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
3339		}
3340	}
3341	reiserfs_write_unlock(inode->i_sb);
3342
3343	if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
3344	     ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
3345	    (get_inode_sd_version(inode) == STAT_DATA_V1)) {
3346		/* stat data of format v3.5 has 16 bit uid and gid */
3347		error = -EINVAL;
3348		goto out;
3349	}
3350
3351	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3352	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3353		struct reiserfs_transaction_handle th;
3354		int jbegin_count =
3355		    2 *
3356		    (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
3357		     REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
3358		    2;
3359
3360		error = reiserfs_chown_xattrs(inode, attr);
3361
3362		if (error)
3363			return error;
3364
3365		/*
3366		 * (user+group)*(old+new) structure - we count quota
3367		 * info and , inode write (sb, inode)
3368		 */
3369		reiserfs_write_lock(inode->i_sb);
3370		error = journal_begin(&th, inode->i_sb, jbegin_count);
3371		reiserfs_write_unlock(inode->i_sb);
3372		if (error)
3373			goto out;
3374		error = dquot_transfer(inode, attr);
3375		reiserfs_write_lock(inode->i_sb);
3376		if (error) {
3377			journal_end(&th);
3378			reiserfs_write_unlock(inode->i_sb);
3379			goto out;
3380		}
3381
3382		/*
3383		 * Update corresponding info in inode so that everything
3384		 * is in one transaction
3385		 */
3386		if (attr->ia_valid & ATTR_UID)
3387			inode->i_uid = attr->ia_uid;
3388		if (attr->ia_valid & ATTR_GID)
3389			inode->i_gid = attr->ia_gid;
3390		mark_inode_dirty(inode);
3391		error = journal_end(&th);
3392		reiserfs_write_unlock(inode->i_sb);
3393		if (error)
3394			goto out;
3395	}
3396
 
 
 
 
 
 
 
3397	if ((attr->ia_valid & ATTR_SIZE) &&
3398	    attr->ia_size != i_size_read(inode)) {
3399		error = inode_newsize_ok(inode, attr->ia_size);
3400		if (!error) {
3401			/*
3402			 * Could race against reiserfs_file_release
3403			 * if called from NFS, so take tailpack mutex.
3404			 */
3405			mutex_lock(&REISERFS_I(inode)->tailpack);
3406			truncate_setsize(inode, attr->ia_size);
3407			reiserfs_truncate_file(inode, 1);
3408			mutex_unlock(&REISERFS_I(inode)->tailpack);
3409		}
3410	}
3411
3412	if (!error) {
3413		setattr_copy(inode, attr);
3414		mark_inode_dirty(inode);
3415	}
 
3416
3417	if (!error && reiserfs_posixacl(inode->i_sb)) {
3418		if (attr->ia_valid & ATTR_MODE)
3419			error = reiserfs_acl_chmod(inode);
3420	}
3421
3422out:
 
 
3423	return error;
3424}
3425
3426const struct address_space_operations reiserfs_address_space_operations = {
3427	.writepage = reiserfs_writepage,
3428	.readpage = reiserfs_readpage,
3429	.readpages = reiserfs_readpages,
3430	.releasepage = reiserfs_releasepage,
3431	.invalidatepage = reiserfs_invalidatepage,
3432	.write_begin = reiserfs_write_begin,
3433	.write_end = reiserfs_write_end,
3434	.bmap = reiserfs_aop_bmap,
3435	.direct_IO = reiserfs_direct_IO,
3436	.set_page_dirty = reiserfs_set_page_dirty,
3437};