Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 *  linux/fs/ext4/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  16 *	(jj@sunsite.ms.mff.cuni.cz)
  17 *
  18 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  19 */
  20
  21#include <linux/fs.h>
  22#include <linux/time.h>
  23#include <linux/jbd2.h>
  24#include <linux/highuid.h>
  25#include <linux/pagemap.h>
  26#include <linux/quotaops.h>
  27#include <linux/string.h>
  28#include <linux/buffer_head.h>
  29#include <linux/writeback.h>
  30#include <linux/pagevec.h>
  31#include <linux/mpage.h>
  32#include <linux/namei.h>
  33#include <linux/uio.h>
  34#include <linux/bio.h>
  35#include <linux/workqueue.h>
  36#include <linux/kernel.h>
  37#include <linux/printk.h>
  38#include <linux/slab.h>
  39#include <linux/ratelimit.h>
 
 
  40
  41#include "ext4_jbd2.h"
  42#include "xattr.h"
  43#include "acl.h"
  44#include "truncate.h"
  45
  46#include <trace/events/ext4.h>
  47
  48#define MPAGE_DA_EXTENT_TAIL 0x01
  49
  50static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
  51			      struct ext4_inode_info *ei)
  52{
  53	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  54	__u16 csum_lo;
  55	__u16 csum_hi = 0;
  56	__u32 csum;
  57
  58	csum_lo = raw->i_checksum_lo;
  59	raw->i_checksum_lo = 0;
  60	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  61	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
  62		csum_hi = raw->i_checksum_hi;
  63		raw->i_checksum_hi = 0;
  64	}
  65
  66	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
  67			   EXT4_INODE_SIZE(inode->i_sb));
  68
  69	raw->i_checksum_lo = csum_lo;
  70	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  71	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  72		raw->i_checksum_hi = csum_hi;
  73
  74	return csum;
  75}
  76
  77static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
  78				  struct ext4_inode_info *ei)
  79{
  80	__u32 provided, calculated;
  81
  82	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  83	    cpu_to_le32(EXT4_OS_LINUX) ||
  84	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  85		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
  86		return 1;
  87
  88	provided = le16_to_cpu(raw->i_checksum_lo);
  89	calculated = ext4_inode_csum(inode, raw, ei);
  90	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  91	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  92		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
  93	else
  94		calculated &= 0xFFFF;
  95
  96	return provided == calculated;
  97}
  98
  99static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
 100				struct ext4_inode_info *ei)
 101{
 102	__u32 csum;
 103
 104	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 105	    cpu_to_le32(EXT4_OS_LINUX) ||
 106	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
 107		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
 108		return;
 109
 110	csum = ext4_inode_csum(inode, raw, ei);
 111	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
 112	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
 113	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
 114		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
 115}
 116
 117static inline int ext4_begin_ordered_truncate(struct inode *inode,
 118					      loff_t new_size)
 119{
 120	trace_ext4_begin_ordered_truncate(inode, new_size);
 121	/*
 122	 * If jinode is zero, then we never opened the file for
 123	 * writing, so there's no need to call
 124	 * jbd2_journal_begin_ordered_truncate() since there's no
 125	 * outstanding writes we need to flush.
 126	 */
 127	if (!EXT4_I(inode)->jinode)
 128		return 0;
 129	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 130						   EXT4_I(inode)->jinode,
 131						   new_size);
 132}
 133
 134static void ext4_invalidatepage(struct page *page, unsigned long offset);
 135static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
 136				   struct buffer_head *bh_result, int create);
 137static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
 138static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
 139static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 140static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 141static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
 142		struct inode *inode, struct page *page, loff_t from,
 143		loff_t length, int flags);
 144
 145/*
 146 * Test whether an inode is a fast symlink.
 147 */
 148static int ext4_inode_is_fast_symlink(struct inode *inode)
 149{
 150	int ea_blocks = EXT4_I(inode)->i_file_acl ?
 151		(inode->i_sb->s_blocksize >> 9) : 0;
 152
 153	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 154}
 155
 156/*
 157 * Restart the transaction associated with *handle.  This does a commit,
 158 * so before we call here everything must be consistently dirtied against
 159 * this transaction.
 160 */
 161int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
 162				 int nblocks)
 163{
 164	int ret;
 165
 166	/*
 167	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
 168	 * moment, get_block can be called only for blocks inside i_size since
 169	 * page cache has been already dropped and writes are blocked by
 170	 * i_mutex. So we can safely drop the i_data_sem here.
 171	 */
 172	BUG_ON(EXT4_JOURNAL(inode) == NULL);
 173	jbd_debug(2, "restarting handle %p\n", handle);
 174	up_write(&EXT4_I(inode)->i_data_sem);
 175	ret = ext4_journal_restart(handle, nblocks);
 176	down_write(&EXT4_I(inode)->i_data_sem);
 177	ext4_discard_preallocations(inode);
 178
 179	return ret;
 180}
 181
 182/*
 183 * Called at the last iput() if i_nlink is zero.
 184 */
 185void ext4_evict_inode(struct inode *inode)
 186{
 187	handle_t *handle;
 188	int err;
 189
 190	trace_ext4_evict_inode(inode);
 191
 192	ext4_ioend_wait(inode);
 193
 194	if (inode->i_nlink) {
 195		/*
 196		 * When journalling data dirty buffers are tracked only in the
 197		 * journal. So although mm thinks everything is clean and
 198		 * ready for reaping the inode might still have some pages to
 199		 * write in the running transaction or waiting to be
 200		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
 201		 * (via truncate_inode_pages()) to discard these buffers can
 202		 * cause data loss. Also even if we did not discard these
 203		 * buffers, we would have no way to find them after the inode
 204		 * is reaped and thus user could see stale data if he tries to
 205		 * read them before the transaction is checkpointed. So be
 206		 * careful and force everything to disk here... We use
 207		 * ei->i_datasync_tid to store the newest transaction
 208		 * containing inode's data.
 209		 *
 210		 * Note that directories do not have this problem because they
 211		 * don't use page cache.
 212		 */
 213		if (ext4_should_journal_data(inode) &&
 214		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
 
 215			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
 216			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
 217
 218			jbd2_log_start_commit(journal, commit_tid);
 219			jbd2_log_wait_commit(journal, commit_tid);
 220			filemap_write_and_wait(&inode->i_data);
 221		}
 222		truncate_inode_pages(&inode->i_data, 0);
 
 
 223		goto no_delete;
 224	}
 225
 226	if (!is_bad_inode(inode))
 227		dquot_initialize(inode);
 228
 229	if (ext4_should_order_data(inode))
 230		ext4_begin_ordered_truncate(inode, 0);
 231	truncate_inode_pages(&inode->i_data, 0);
 232
 
 233	if (is_bad_inode(inode))
 234		goto no_delete;
 235
 236	handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
 
 
 
 
 
 
 237	if (IS_ERR(handle)) {
 238		ext4_std_error(inode->i_sb, PTR_ERR(handle));
 239		/*
 240		 * If we're going to skip the normal cleanup, we still need to
 241		 * make sure that the in-core orphan linked list is properly
 242		 * cleaned up.
 243		 */
 244		ext4_orphan_del(NULL, inode);
 
 245		goto no_delete;
 246	}
 247
 248	if (IS_SYNC(inode))
 249		ext4_handle_sync(handle);
 250	inode->i_size = 0;
 251	err = ext4_mark_inode_dirty(handle, inode);
 252	if (err) {
 253		ext4_warning(inode->i_sb,
 254			     "couldn't mark inode dirty (err %d)", err);
 255		goto stop_handle;
 256	}
 257	if (inode->i_blocks)
 258		ext4_truncate(inode);
 259
 260	/*
 261	 * ext4_ext_truncate() doesn't reserve any slop when it
 262	 * restarts journal transactions; therefore there may not be
 263	 * enough credits left in the handle to remove the inode from
 264	 * the orphan list and set the dtime field.
 265	 */
 266	if (!ext4_handle_has_enough_credits(handle, 3)) {
 267		err = ext4_journal_extend(handle, 3);
 268		if (err > 0)
 269			err = ext4_journal_restart(handle, 3);
 270		if (err != 0) {
 271			ext4_warning(inode->i_sb,
 272				     "couldn't extend journal (err %d)", err);
 273		stop_handle:
 274			ext4_journal_stop(handle);
 275			ext4_orphan_del(NULL, inode);
 
 276			goto no_delete;
 277		}
 278	}
 279
 280	/*
 281	 * Kill off the orphan record which ext4_truncate created.
 282	 * AKPM: I think this can be inside the above `if'.
 283	 * Note that ext4_orphan_del() has to be able to cope with the
 284	 * deletion of a non-existent orphan - this is because we don't
 285	 * know if ext4_truncate() actually created an orphan record.
 286	 * (Well, we could do this if we need to, but heck - it works)
 287	 */
 288	ext4_orphan_del(handle, inode);
 289	EXT4_I(inode)->i_dtime	= get_seconds();
 290
 291	/*
 292	 * One subtle ordering requirement: if anything has gone wrong
 293	 * (transaction abort, IO errors, whatever), then we can still
 294	 * do these next steps (the fs will already have been marked as
 295	 * having errors), but we can't free the inode if the mark_dirty
 296	 * fails.
 297	 */
 298	if (ext4_mark_inode_dirty(handle, inode))
 299		/* If that failed, just do the required in-core inode clear. */
 300		ext4_clear_inode(inode);
 301	else
 302		ext4_free_inode(handle, inode);
 303	ext4_journal_stop(handle);
 
 304	return;
 305no_delete:
 306	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
 307}
 308
 309#ifdef CONFIG_QUOTA
 310qsize_t *ext4_get_reserved_space(struct inode *inode)
 311{
 312	return &EXT4_I(inode)->i_reserved_quota;
 313}
 314#endif
 315
 316/*
 317 * Calculate the number of metadata blocks need to reserve
 318 * to allocate a block located at @lblock
 319 */
 320static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 321{
 322	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 323		return ext4_ext_calc_metadata_amount(inode, lblock);
 324
 325	return ext4_ind_calc_metadata_amount(inode, lblock);
 326}
 327
 328/*
 329 * Called with i_data_sem down, which is important since we can call
 330 * ext4_discard_preallocations() from here.
 331 */
 332void ext4_da_update_reserve_space(struct inode *inode,
 333					int used, int quota_claim)
 334{
 335	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 336	struct ext4_inode_info *ei = EXT4_I(inode);
 337
 338	spin_lock(&ei->i_block_reservation_lock);
 339	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 340	if (unlikely(used > ei->i_reserved_data_blocks)) {
 341		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
 342			 "with only %d reserved data blocks",
 343			 __func__, inode->i_ino, used,
 344			 ei->i_reserved_data_blocks);
 345		WARN_ON(1);
 346		used = ei->i_reserved_data_blocks;
 347	}
 348
 349	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
 350		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
 351			 "with only %d reserved metadata blocks\n", __func__,
 352			 inode->i_ino, ei->i_allocated_meta_blocks,
 353			 ei->i_reserved_meta_blocks);
 
 
 354		WARN_ON(1);
 355		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
 356	}
 357
 358	/* Update per-inode reservations */
 359	ei->i_reserved_data_blocks -= used;
 360	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
 361	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 362			   used + ei->i_allocated_meta_blocks);
 363	ei->i_allocated_meta_blocks = 0;
 364
 365	if (ei->i_reserved_data_blocks == 0) {
 366		/*
 367		 * We can release all of the reserved metadata blocks
 368		 * only when we have written all of the delayed
 369		 * allocation blocks.
 370		 */
 371		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 372				   ei->i_reserved_meta_blocks);
 373		ei->i_reserved_meta_blocks = 0;
 374		ei->i_da_metadata_calc_len = 0;
 375	}
 376	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 377
 378	/* Update quota subsystem for data blocks */
 379	if (quota_claim)
 380		dquot_claim_block(inode, EXT4_C2B(sbi, used));
 381	else {
 382		/*
 383		 * We did fallocate with an offset that is already delayed
 384		 * allocated. So on delayed allocated writeback we should
 385		 * not re-claim the quota for fallocated blocks.
 386		 */
 387		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
 388	}
 389
 390	/*
 391	 * If we have done all the pending block allocations and if
 392	 * there aren't any writers on the inode, we can discard the
 393	 * inode's preallocations.
 394	 */
 395	if ((ei->i_reserved_data_blocks == 0) &&
 396	    (atomic_read(&inode->i_writecount) == 0))
 397		ext4_discard_preallocations(inode);
 398}
 399
 400static int __check_block_validity(struct inode *inode, const char *func,
 401				unsigned int line,
 402				struct ext4_map_blocks *map)
 403{
 404	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
 405				   map->m_len)) {
 406		ext4_error_inode(inode, func, line, map->m_pblk,
 407				 "lblock %lu mapped to illegal pblock "
 408				 "(length %d)", (unsigned long) map->m_lblk,
 409				 map->m_len);
 410		return -EIO;
 411	}
 412	return 0;
 413}
 414
 415#define check_block_validity(inode, map)	\
 416	__check_block_validity((inode), __func__, __LINE__, (map))
 417
 418/*
 419 * Return the number of contiguous dirty pages in a given inode
 420 * starting at page frame idx.
 421 */
 422static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
 423				    unsigned int max_pages)
 424{
 425	struct address_space *mapping = inode->i_mapping;
 426	pgoff_t	index;
 427	struct pagevec pvec;
 428	pgoff_t num = 0;
 429	int i, nr_pages, done = 0;
 430
 431	if (max_pages == 0)
 432		return 0;
 433	pagevec_init(&pvec, 0);
 434	while (!done) {
 435		index = idx;
 436		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
 437					      PAGECACHE_TAG_DIRTY,
 438					      (pgoff_t)PAGEVEC_SIZE);
 439		if (nr_pages == 0)
 440			break;
 441		for (i = 0; i < nr_pages; i++) {
 442			struct page *page = pvec.pages[i];
 443			struct buffer_head *bh, *head;
 444
 445			lock_page(page);
 446			if (unlikely(page->mapping != mapping) ||
 447			    !PageDirty(page) ||
 448			    PageWriteback(page) ||
 449			    page->index != idx) {
 450				done = 1;
 451				unlock_page(page);
 452				break;
 453			}
 454			if (page_has_buffers(page)) {
 455				bh = head = page_buffers(page);
 456				do {
 457					if (!buffer_delay(bh) &&
 458					    !buffer_unwritten(bh))
 459						done = 1;
 460					bh = bh->b_this_page;
 461				} while (!done && (bh != head));
 462			}
 463			unlock_page(page);
 464			if (done)
 465				break;
 466			idx++;
 467			num++;
 468			if (num >= max_pages) {
 469				done = 1;
 470				break;
 471			}
 472		}
 473		pagevec_release(&pvec);
 474	}
 475	return num;
 476}
 477
 478/*
 479 * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
 480 */
 481static void set_buffers_da_mapped(struct inode *inode,
 482				   struct ext4_map_blocks *map)
 483{
 484	struct address_space *mapping = inode->i_mapping;
 485	struct pagevec pvec;
 486	int i, nr_pages;
 487	pgoff_t index, end;
 488
 489	index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
 490	end = (map->m_lblk + map->m_len - 1) >>
 491		(PAGE_CACHE_SHIFT - inode->i_blkbits);
 492
 493	pagevec_init(&pvec, 0);
 494	while (index <= end) {
 495		nr_pages = pagevec_lookup(&pvec, mapping, index,
 496					  min(end - index + 1,
 497					      (pgoff_t)PAGEVEC_SIZE));
 498		if (nr_pages == 0)
 499			break;
 500		for (i = 0; i < nr_pages; i++) {
 501			struct page *page = pvec.pages[i];
 502			struct buffer_head *bh, *head;
 503
 504			if (unlikely(page->mapping != mapping) ||
 505			    !PageDirty(page))
 506				break;
 507
 508			if (page_has_buffers(page)) {
 509				bh = head = page_buffers(page);
 510				do {
 511					set_buffer_da_mapped(bh);
 512					bh = bh->b_this_page;
 513				} while (bh != head);
 514			}
 515			index++;
 516		}
 517		pagevec_release(&pvec);
 
 
 
 
 518	}
 519}
 
 520
 521/*
 522 * The ext4_map_blocks() function tries to look up the requested blocks,
 523 * and returns if the blocks are already mapped.
 524 *
 525 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 526 * and store the allocated blocks in the result buffer head and mark it
 527 * mapped.
 528 *
 529 * If file type is extents based, it will call ext4_ext_map_blocks(),
 530 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
 531 * based files
 532 *
 533 * On success, it returns the number of blocks being mapped or allocate.
 534 * if create==0 and the blocks are pre-allocated and uninitialized block,
 535 * the result buffer head is unmapped. If the create ==1, it will make sure
 536 * the buffer head is mapped.
 537 *
 538 * It returns 0 if plain look up failed (blocks have not been allocated), in
 539 * that case, buffer head is unmapped
 540 *
 541 * It returns the error in case of allocation failure.
 542 */
 543int ext4_map_blocks(handle_t *handle, struct inode *inode,
 544		    struct ext4_map_blocks *map, int flags)
 545{
 
 546	int retval;
 
 
 
 
 
 
 547
 548	map->m_flags = 0;
 549	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
 550		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
 551		  (unsigned long) map->m_lblk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 552	/*
 553	 * Try to see if we can get the block without requesting a new
 554	 * file system block.
 555	 */
 556	down_read((&EXT4_I(inode)->i_data_sem));
 
 557	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 558		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 559					     EXT4_GET_BLOCKS_KEEP_SIZE);
 560	} else {
 561		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 562					     EXT4_GET_BLOCKS_KEEP_SIZE);
 563	}
 564	up_read((&EXT4_I(inode)->i_data_sem));
 
 565
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 567		int ret = check_block_validity(inode, map);
 568		if (ret != 0)
 569			return ret;
 570	}
 571
 572	/* If it is only a block(s) look up */
 573	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
 574		return retval;
 575
 576	/*
 577	 * Returns if the blocks have already allocated
 578	 *
 579	 * Note that if blocks have been preallocated
 580	 * ext4_ext_get_block() returns the create = 0
 581	 * with buffer head unmapped.
 582	 */
 583	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 584		return retval;
 
 
 
 
 
 
 585
 586	/*
 587	 * When we call get_blocks without the create flag, the
 588	 * BH_Unwritten flag could have gotten set if the blocks
 589	 * requested were part of a uninitialized extent.  We need to
 590	 * clear this flag now that we are committed to convert all or
 591	 * part of the uninitialized extent to be an initialized
 592	 * extent.  This is because we need to avoid the combination
 593	 * of BH_Unwritten and BH_Mapped flags being simultaneously
 594	 * set on the buffer_head.
 595	 */
 596	map->m_flags &= ~EXT4_MAP_UNWRITTEN;
 597
 598	/*
 599	 * New blocks allocate and/or writing to uninitialized extent
 600	 * will possibly result in updating i_data, so we take
 601	 * the write lock of i_data_sem, and call get_blocks()
 602	 * with create == 1 flag.
 603	 */
 604	down_write((&EXT4_I(inode)->i_data_sem));
 605
 606	/*
 607	 * if the caller is from delayed allocation writeout path
 608	 * we have already reserved fs blocks for allocation
 609	 * let the underlying get_block() function know to
 610	 * avoid double accounting
 611	 */
 612	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
 613		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 614	/*
 615	 * We need to check for EXT4 here because migrate
 616	 * could have changed the inode type in between
 617	 */
 618	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 619		retval = ext4_ext_map_blocks(handle, inode, map, flags);
 620	} else {
 621		retval = ext4_ind_map_blocks(handle, inode, map, flags);
 622
 623		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
 624			/*
 625			 * We allocated new blocks which will result in
 626			 * i_data's format changing.  Force the migrate
 627			 * to fail by clearing migrate flags
 628			 */
 629			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 630		}
 631
 632		/*
 633		 * Update reserved blocks/metadata blocks after successful
 634		 * block allocation which had been deferred till now. We don't
 635		 * support fallocate for non extent files. So we can update
 636		 * reserve space here.
 637		 */
 638		if ((retval > 0) &&
 639			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
 640			ext4_da_update_reserve_space(inode, retval, 1);
 641	}
 642	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
 643		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 644
 645		/* If we have successfully mapped the delayed allocated blocks,
 646		 * set the BH_Da_Mapped bit on them. Its important to do this
 647		 * under the protection of i_data_sem.
 
 
 
 
 
 
 
 
 
 
 
 648		 */
 649		if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 650			set_buffers_da_mapped(inode, map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 651	}
 652
 
 653	up_write((&EXT4_I(inode)->i_data_sem));
 654	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 655		int ret = check_block_validity(inode, map);
 656		if (ret != 0)
 657			return ret;
 658	}
 659	return retval;
 660}
 661
 662/* Maximum number of blocks we map for direct IO at once. */
 663#define DIO_MAX_BLOCKS 4096
 664
 665static int _ext4_get_block(struct inode *inode, sector_t iblock,
 666			   struct buffer_head *bh, int flags)
 667{
 668	handle_t *handle = ext4_journal_current_handle();
 669	struct ext4_map_blocks map;
 670	int ret = 0, started = 0;
 671	int dio_credits;
 672
 
 
 
 673	map.m_lblk = iblock;
 674	map.m_len = bh->b_size >> inode->i_blkbits;
 675
 676	if (flags && !handle) {
 677		/* Direct IO write... */
 678		if (map.m_len > DIO_MAX_BLOCKS)
 679			map.m_len = DIO_MAX_BLOCKS;
 680		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
 681		handle = ext4_journal_start(inode, dio_credits);
 
 682		if (IS_ERR(handle)) {
 683			ret = PTR_ERR(handle);
 684			return ret;
 685		}
 686		started = 1;
 687	}
 688
 689	ret = ext4_map_blocks(handle, inode, &map, flags);
 690	if (ret > 0) {
 
 
 691		map_bh(bh, inode->i_sb, map.m_pblk);
 692		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
 
 
 693		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 694		ret = 0;
 695	}
 696	if (started)
 697		ext4_journal_stop(handle);
 698	return ret;
 699}
 700
 701int ext4_get_block(struct inode *inode, sector_t iblock,
 702		   struct buffer_head *bh, int create)
 703{
 704	return _ext4_get_block(inode, iblock, bh,
 705			       create ? EXT4_GET_BLOCKS_CREATE : 0);
 706}
 707
 708/*
 709 * `handle' can be NULL if create is zero
 710 */
 711struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 712				ext4_lblk_t block, int create, int *errp)
 713{
 714	struct ext4_map_blocks map;
 715	struct buffer_head *bh;
 716	int fatal = 0, err;
 717
 718	J_ASSERT(handle != NULL || create == 0);
 719
 720	map.m_lblk = block;
 721	map.m_len = 1;
 722	err = ext4_map_blocks(handle, inode, &map,
 723			      create ? EXT4_GET_BLOCKS_CREATE : 0);
 724
 
 
 
 
 
 725	if (err < 0)
 726		*errp = err;
 727	if (err <= 0)
 728		return NULL;
 729	*errp = 0;
 730
 731	bh = sb_getblk(inode->i_sb, map.m_pblk);
 732	if (!bh) {
 733		*errp = -EIO;
 734		return NULL;
 735	}
 736	if (map.m_flags & EXT4_MAP_NEW) {
 737		J_ASSERT(create != 0);
 738		J_ASSERT(handle != NULL);
 739
 740		/*
 741		 * Now that we do not always journal data, we should
 742		 * keep in mind whether this should always journal the
 743		 * new buffer as metadata.  For now, regular file
 744		 * writes use ext4_get_block instead, so it's not a
 745		 * problem.
 746		 */
 747		lock_buffer(bh);
 748		BUFFER_TRACE(bh, "call get_create_access");
 749		fatal = ext4_journal_get_create_access(handle, bh);
 750		if (!fatal && !buffer_uptodate(bh)) {
 751			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
 752			set_buffer_uptodate(bh);
 753		}
 754		unlock_buffer(bh);
 755		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 756		err = ext4_handle_dirty_metadata(handle, inode, bh);
 757		if (!fatal)
 758			fatal = err;
 759	} else {
 760		BUFFER_TRACE(bh, "not a new buffer");
 761	}
 762	if (fatal) {
 763		*errp = fatal;
 764		brelse(bh);
 765		bh = NULL;
 766	}
 767	return bh;
 768}
 769
 770struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
 771			       ext4_lblk_t block, int create, int *err)
 772{
 773	struct buffer_head *bh;
 774
 775	bh = ext4_getblk(handle, inode, block, create, err);
 776	if (!bh)
 777		return bh;
 778	if (buffer_uptodate(bh))
 779		return bh;
 780	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
 781	wait_on_buffer(bh);
 782	if (buffer_uptodate(bh))
 783		return bh;
 784	put_bh(bh);
 785	*err = -EIO;
 786	return NULL;
 787}
 788
 789static int walk_page_buffers(handle_t *handle,
 790			     struct buffer_head *head,
 791			     unsigned from,
 792			     unsigned to,
 793			     int *partial,
 794			     int (*fn)(handle_t *handle,
 795				       struct buffer_head *bh))
 796{
 797	struct buffer_head *bh;
 798	unsigned block_start, block_end;
 799	unsigned blocksize = head->b_size;
 800	int err, ret = 0;
 801	struct buffer_head *next;
 802
 803	for (bh = head, block_start = 0;
 804	     ret == 0 && (bh != head || !block_start);
 805	     block_start = block_end, bh = next) {
 806		next = bh->b_this_page;
 807		block_end = block_start + blocksize;
 808		if (block_end <= from || block_start >= to) {
 809			if (partial && !buffer_uptodate(bh))
 810				*partial = 1;
 811			continue;
 812		}
 813		err = (*fn)(handle, bh);
 814		if (!ret)
 815			ret = err;
 816	}
 817	return ret;
 818}
 819
 820/*
 821 * To preserve ordering, it is essential that the hole instantiation and
 822 * the data write be encapsulated in a single transaction.  We cannot
 823 * close off a transaction and start a new one between the ext4_get_block()
 824 * and the commit_write().  So doing the jbd2_journal_start at the start of
 825 * prepare_write() is the right place.
 826 *
 827 * Also, this function can nest inside ext4_writepage() ->
 828 * block_write_full_page(). In that case, we *know* that ext4_writepage()
 829 * has generated enough buffer credits to do the whole page.  So we won't
 830 * block on the journal in that case, which is good, because the caller may
 831 * be PF_MEMALLOC.
 832 *
 833 * By accident, ext4 can be reentered when a transaction is open via
 834 * quota file writes.  If we were to commit the transaction while thus
 835 * reentered, there can be a deadlock - we would be holding a quota
 836 * lock, and the commit would never complete if another thread had a
 837 * transaction open and was blocking on the quota lock - a ranking
 838 * violation.
 839 *
 840 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
 841 * will _not_ run commit under these circumstances because handle->h_ref
 842 * is elevated.  We'll still have enough credits for the tiny quotafile
 843 * write.
 844 */
 845static int do_journal_get_write_access(handle_t *handle,
 846				       struct buffer_head *bh)
 847{
 848	int dirty = buffer_dirty(bh);
 849	int ret;
 850
 851	if (!buffer_mapped(bh) || buffer_freed(bh))
 852		return 0;
 853	/*
 854	 * __block_write_begin() could have dirtied some buffers. Clean
 855	 * the dirty bit as jbd2_journal_get_write_access() could complain
 856	 * otherwise about fs integrity issues. Setting of the dirty bit
 857	 * by __block_write_begin() isn't a real problem here as we clear
 858	 * the bit before releasing a page lock and thus writeback cannot
 859	 * ever write the buffer.
 860	 */
 861	if (dirty)
 862		clear_buffer_dirty(bh);
 863	ret = ext4_journal_get_write_access(handle, bh);
 864	if (!ret && dirty)
 865		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
 866	return ret;
 867}
 868
 869static int ext4_get_block_write(struct inode *inode, sector_t iblock,
 870		   struct buffer_head *bh_result, int create);
 871static int ext4_write_begin(struct file *file, struct address_space *mapping,
 872			    loff_t pos, unsigned len, unsigned flags,
 873			    struct page **pagep, void **fsdata)
 874{
 875	struct inode *inode = mapping->host;
 876	int ret, needed_blocks;
 877	handle_t *handle;
 878	int retries = 0;
 879	struct page *page;
 880	pgoff_t index;
 881	unsigned from, to;
 882
 883	trace_ext4_write_begin(inode, pos, len, flags);
 884	/*
 885	 * Reserve one block more for addition to orphan list in case
 886	 * we allocate blocks but write fails for some reason
 887	 */
 888	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
 889	index = pos >> PAGE_CACHE_SHIFT;
 890	from = pos & (PAGE_CACHE_SIZE - 1);
 891	to = from + len;
 892
 893retry:
 894	handle = ext4_journal_start(inode, needed_blocks);
 895	if (IS_ERR(handle)) {
 896		ret = PTR_ERR(handle);
 897		goto out;
 
 
 898	}
 899
 900	/* We cannot recurse into the filesystem as the transaction is already
 901	 * started */
 902	flags |= AOP_FLAG_NOFS;
 903
 
 
 
 
 904	page = grab_cache_page_write_begin(mapping, index, flags);
 905	if (!page) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906		ext4_journal_stop(handle);
 907		ret = -ENOMEM;
 908		goto out;
 909	}
 910	*pagep = page;
 
 911
 912	if (ext4_should_dioread_nolock(inode))
 913		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
 914	else
 915		ret = __block_write_begin(page, pos, len, ext4_get_block);
 916
 917	if (!ret && ext4_should_journal_data(inode)) {
 918		ret = walk_page_buffers(handle, page_buffers(page),
 919				from, to, NULL, do_journal_get_write_access);
 
 920	}
 921
 922	if (ret) {
 923		unlock_page(page);
 924		page_cache_release(page);
 925		/*
 926		 * __block_write_begin may have instantiated a few blocks
 927		 * outside i_size.  Trim these off again. Don't need
 928		 * i_size_read because we hold i_mutex.
 929		 *
 930		 * Add inode to orphan list in case we crash before
 931		 * truncate finishes
 932		 */
 933		if (pos + len > inode->i_size && ext4_can_truncate(inode))
 934			ext4_orphan_add(handle, inode);
 935
 936		ext4_journal_stop(handle);
 937		if (pos + len > inode->i_size) {
 938			ext4_truncate_failed_write(inode);
 939			/*
 940			 * If truncate failed early the inode might
 941			 * still be on the orphan list; we need to
 942			 * make sure the inode is removed from the
 943			 * orphan list in that case.
 944			 */
 945			if (inode->i_nlink)
 946				ext4_orphan_del(NULL, inode);
 947		}
 948	}
 949
 950	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 951		goto retry;
 952out:
 
 
 
 
 953	return ret;
 954}
 955
 956/* For write_end() in data=journal mode */
 957static int write_end_fn(handle_t *handle, struct buffer_head *bh)
 958{
 
 959	if (!buffer_mapped(bh) || buffer_freed(bh))
 960		return 0;
 961	set_buffer_uptodate(bh);
 962	return ext4_handle_dirty_metadata(handle, NULL, bh);
 
 
 
 963}
 964
 965static int ext4_generic_write_end(struct file *file,
 966				  struct address_space *mapping,
 967				  loff_t pos, unsigned len, unsigned copied,
 968				  struct page *page, void *fsdata)
 
 
 
 
 
 
 
 969{
 970	int i_size_changed = 0;
 971	struct inode *inode = mapping->host;
 972	handle_t *handle = ext4_journal_current_handle();
 
 
 
 973
 974	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975
 976	/*
 977	 * No need to use i_size_read() here, the i_size
 978	 * cannot change under us because we hold i_mutex.
 979	 *
 980	 * But it's important to update i_size while still holding page lock:
 981	 * page writeout could otherwise come in and zero beyond i_size.
 982	 */
 983	if (pos + copied > inode->i_size) {
 984		i_size_write(inode, pos + copied);
 985		i_size_changed = 1;
 986	}
 987
 988	if (pos + copied >  EXT4_I(inode)->i_disksize) {
 989		/* We need to mark inode dirty even if
 990		 * new_i_size is less that inode->i_size
 991		 * bu greater than i_disksize.(hint delalloc)
 992		 */
 993		ext4_update_i_disksize(inode, (pos + copied));
 994		i_size_changed = 1;
 995	}
 996	unlock_page(page);
 997	page_cache_release(page);
 998
 999	/*
1000	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1001	 * makes the holding time of page lock longer. Second, it forces lock
1002	 * ordering of page lock and transaction start for journaling
1003	 * filesystems.
1004	 */
1005	if (i_size_changed)
1006		ext4_mark_inode_dirty(handle, inode);
1007
1008	return copied;
1009}
1010
1011/*
1012 * We need to pick up the new inode size which generic_commit_write gave us
1013 * `file' can be NULL - eg, when called from page_symlink().
1014 *
1015 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1016 * buffers are managed internally.
1017 */
1018static int ext4_ordered_write_end(struct file *file,
1019				  struct address_space *mapping,
1020				  loff_t pos, unsigned len, unsigned copied,
1021				  struct page *page, void *fsdata)
1022{
1023	handle_t *handle = ext4_journal_current_handle();
1024	struct inode *inode = mapping->host;
1025	int ret = 0, ret2;
1026
1027	trace_ext4_ordered_write_end(inode, pos, len, copied);
1028	ret = ext4_jbd2_file_inode(handle, inode);
1029
1030	if (ret == 0) {
1031		ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1032							page, fsdata);
1033		copied = ret2;
1034		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1035			/* if we have allocated more blocks and copied
1036			 * less. We will have blocks allocated outside
1037			 * inode->i_size. So truncate them
1038			 */
1039			ext4_orphan_add(handle, inode);
1040		if (ret2 < 0)
1041			ret = ret2;
1042	} else {
1043		unlock_page(page);
1044		page_cache_release(page);
1045	}
1046
1047	ret2 = ext4_journal_stop(handle);
1048	if (!ret)
1049		ret = ret2;
1050
1051	if (pos + len > inode->i_size) {
1052		ext4_truncate_failed_write(inode);
1053		/*
1054		 * If truncate failed early the inode might still be
1055		 * on the orphan list; we need to make sure the inode
1056		 * is removed from the orphan list in that case.
1057		 */
1058		if (inode->i_nlink)
1059			ext4_orphan_del(NULL, inode);
1060	}
1061
1062
1063	return ret ? ret : copied;
1064}
1065
1066static int ext4_writeback_write_end(struct file *file,
1067				    struct address_space *mapping,
1068				    loff_t pos, unsigned len, unsigned copied,
1069				    struct page *page, void *fsdata)
1070{
1071	handle_t *handle = ext4_journal_current_handle();
1072	struct inode *inode = mapping->host;
1073	int ret = 0, ret2;
1074
1075	trace_ext4_writeback_write_end(inode, pos, len, copied);
1076	ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1077							page, fsdata);
1078	copied = ret2;
1079	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1080		/* if we have allocated more blocks and copied
1081		 * less. We will have blocks allocated outside
1082		 * inode->i_size. So truncate them
1083		 */
1084		ext4_orphan_add(handle, inode);
1085
1086	if (ret2 < 0)
1087		ret = ret2;
1088
1089	ret2 = ext4_journal_stop(handle);
1090	if (!ret)
1091		ret = ret2;
1092
1093	if (pos + len > inode->i_size) {
1094		ext4_truncate_failed_write(inode);
1095		/*
1096		 * If truncate failed early the inode might still be
1097		 * on the orphan list; we need to make sure the inode
1098		 * is removed from the orphan list in that case.
1099		 */
1100		if (inode->i_nlink)
1101			ext4_orphan_del(NULL, inode);
1102	}
1103
1104	return ret ? ret : copied;
1105}
1106
1107static int ext4_journalled_write_end(struct file *file,
1108				     struct address_space *mapping,
1109				     loff_t pos, unsigned len, unsigned copied,
1110				     struct page *page, void *fsdata)
1111{
1112	handle_t *handle = ext4_journal_current_handle();
1113	struct inode *inode = mapping->host;
1114	int ret = 0, ret2;
1115	int partial = 0;
1116	unsigned from, to;
1117	loff_t new_i_size;
1118
1119	trace_ext4_journalled_write_end(inode, pos, len, copied);
1120	from = pos & (PAGE_CACHE_SIZE - 1);
1121	to = from + len;
1122
1123	BUG_ON(!ext4_handle_valid(handle));
1124
1125	if (copied < len) {
1126		if (!PageUptodate(page))
1127			copied = 0;
1128		page_zero_new_buffers(page, from+copied, to);
1129	}
 
 
 
 
1130
1131	ret = walk_page_buffers(handle, page_buffers(page), from,
1132				to, &partial, write_end_fn);
1133	if (!partial)
1134		SetPageUptodate(page);
 
1135	new_i_size = pos + copied;
1136	if (new_i_size > inode->i_size)
1137		i_size_write(inode, pos+copied);
1138	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1139	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1140	if (new_i_size > EXT4_I(inode)->i_disksize) {
1141		ext4_update_i_disksize(inode, new_i_size);
1142		ret2 = ext4_mark_inode_dirty(handle, inode);
1143		if (!ret)
1144			ret = ret2;
1145	}
1146
1147	unlock_page(page);
1148	page_cache_release(page);
1149	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1150		/* if we have allocated more blocks and copied
1151		 * less. We will have blocks allocated outside
1152		 * inode->i_size. So truncate them
1153		 */
1154		ext4_orphan_add(handle, inode);
1155
1156	ret2 = ext4_journal_stop(handle);
1157	if (!ret)
1158		ret = ret2;
1159	if (pos + len > inode->i_size) {
1160		ext4_truncate_failed_write(inode);
1161		/*
1162		 * If truncate failed early the inode might still be
1163		 * on the orphan list; we need to make sure the inode
1164		 * is removed from the orphan list in that case.
1165		 */
1166		if (inode->i_nlink)
1167			ext4_orphan_del(NULL, inode);
1168	}
1169
1170	return ret ? ret : copied;
1171}
1172
1173/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174 * Reserve a single cluster located at lblock
1175 */
1176static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1177{
1178	int retries = 0;
1179	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1180	struct ext4_inode_info *ei = EXT4_I(inode);
1181	unsigned int md_needed;
1182	int ret;
1183	ext4_lblk_t save_last_lblock;
1184	int save_len;
1185
1186	/*
1187	 * We will charge metadata quota at writeout time; this saves
1188	 * us from metadata over-estimation, though we may go over by
1189	 * a small amount in the end.  Here we just reserve for data.
1190	 */
1191	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1192	if (ret)
1193		return ret;
1194
1195	/*
1196	 * recalculate the amount of metadata blocks to reserve
1197	 * in order to allocate nrblocks
1198	 * worse case is one extent per block
1199	 */
1200repeat:
1201	spin_lock(&ei->i_block_reservation_lock);
1202	/*
1203	 * ext4_calc_metadata_amount() has side effects, which we have
1204	 * to be prepared undo if we fail to claim space.
1205	 */
1206	save_len = ei->i_da_metadata_calc_len;
1207	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1208	md_needed = EXT4_NUM_B2C(sbi,
1209				 ext4_calc_metadata_amount(inode, lblock));
1210	trace_ext4_da_reserve_space(inode, md_needed);
1211
1212	/*
1213	 * We do still charge estimated metadata to the sb though;
1214	 * we cannot afford to run out of free blocks.
1215	 */
1216	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1217		ei->i_da_metadata_calc_len = save_len;
1218		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1219		spin_unlock(&ei->i_block_reservation_lock);
1220		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1221			yield();
1222			goto repeat;
1223		}
1224		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1225		return -ENOSPC;
1226	}
1227	ei->i_reserved_data_blocks++;
1228	ei->i_reserved_meta_blocks += md_needed;
1229	spin_unlock(&ei->i_block_reservation_lock);
1230
1231	return 0;       /* success */
1232}
1233
1234static void ext4_da_release_space(struct inode *inode, int to_free)
1235{
1236	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1237	struct ext4_inode_info *ei = EXT4_I(inode);
1238
1239	if (!to_free)
1240		return;		/* Nothing to release, exit */
1241
1242	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1243
1244	trace_ext4_da_release_space(inode, to_free);
1245	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1246		/*
1247		 * if there aren't enough reserved blocks, then the
1248		 * counter is messed up somewhere.  Since this
1249		 * function is called from invalidate page, it's
1250		 * harmless to return without any action.
1251		 */
1252		ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1253			 "ino %lu, to_free %d with only %d reserved "
1254			 "data blocks", inode->i_ino, to_free,
1255			 ei->i_reserved_data_blocks);
1256		WARN_ON(1);
1257		to_free = ei->i_reserved_data_blocks;
1258	}
1259	ei->i_reserved_data_blocks -= to_free;
1260
1261	if (ei->i_reserved_data_blocks == 0) {
1262		/*
1263		 * We can release all of the reserved metadata blocks
1264		 * only when we have written all of the delayed
1265		 * allocation blocks.
1266		 * Note that in case of bigalloc, i_reserved_meta_blocks,
1267		 * i_reserved_data_blocks, etc. refer to number of clusters.
1268		 */
1269		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1270				   ei->i_reserved_meta_blocks);
1271		ei->i_reserved_meta_blocks = 0;
1272		ei->i_da_metadata_calc_len = 0;
1273	}
1274
1275	/* update fs dirty data blocks counter */
1276	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1277
1278	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1279
1280	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1281}
1282
1283static void ext4_da_page_release_reservation(struct page *page,
1284					     unsigned long offset)
 
1285{
1286	int to_release = 0;
1287	struct buffer_head *head, *bh;
1288	unsigned int curr_off = 0;
1289	struct inode *inode = page->mapping->host;
1290	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
1291	int num_clusters;
 
 
 
1292
1293	head = page_buffers(page);
1294	bh = head;
1295	do {
1296		unsigned int next_off = curr_off + bh->b_size;
1297
 
 
 
1298		if ((offset <= curr_off) && (buffer_delay(bh))) {
1299			to_release++;
1300			clear_buffer_delay(bh);
1301			clear_buffer_da_mapped(bh);
1302		}
1303		curr_off = next_off;
1304	} while ((bh = bh->b_this_page) != head);
1305
 
 
 
 
 
1306	/* If we have released all the blocks belonging to a cluster, then we
1307	 * need to release the reserved space for that cluster. */
1308	num_clusters = EXT4_NUM_B2C(sbi, to_release);
1309	while (num_clusters > 0) {
1310		ext4_fsblk_t lblk;
1311		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1312			((num_clusters - 1) << sbi->s_cluster_bits);
1313		if (sbi->s_cluster_ratio == 1 ||
1314		    !ext4_find_delalloc_cluster(inode, lblk, 1))
1315			ext4_da_release_space(inode, 1);
1316
1317		num_clusters--;
1318	}
1319}
1320
1321/*
1322 * Delayed allocation stuff
1323 */
1324
1325/*
1326 * mpage_da_submit_io - walks through extent of pages and try to write
1327 * them with writepage() call back
1328 *
1329 * @mpd->inode: inode
1330 * @mpd->first_page: first page of the extent
1331 * @mpd->next_page: page after the last page of the extent
1332 *
1333 * By the time mpage_da_submit_io() is called we expect all blocks
1334 * to be allocated. this may be wrong if allocation failed.
1335 *
1336 * As pages are already locked by write_cache_pages(), we can't use it
1337 */
1338static int mpage_da_submit_io(struct mpage_da_data *mpd,
1339			      struct ext4_map_blocks *map)
1340{
1341	struct pagevec pvec;
1342	unsigned long index, end;
1343	int ret = 0, err, nr_pages, i;
1344	struct inode *inode = mpd->inode;
1345	struct address_space *mapping = inode->i_mapping;
1346	loff_t size = i_size_read(inode);
1347	unsigned int len, block_start;
1348	struct buffer_head *bh, *page_bufs = NULL;
1349	int journal_data = ext4_should_journal_data(inode);
1350	sector_t pblock = 0, cur_logical = 0;
1351	struct ext4_io_submit io_submit;
1352
1353	BUG_ON(mpd->next_page <= mpd->first_page);
1354	memset(&io_submit, 0, sizeof(io_submit));
1355	/*
1356	 * We need to start from the first_page to the next_page - 1
1357	 * to make sure we also write the mapped dirty buffer_heads.
1358	 * If we look at mpd->b_blocknr we would only be looking
1359	 * at the currently mapped buffer_heads.
1360	 */
1361	index = mpd->first_page;
1362	end = mpd->next_page - 1;
1363
1364	pagevec_init(&pvec, 0);
1365	while (index <= end) {
1366		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1367		if (nr_pages == 0)
1368			break;
1369		for (i = 0; i < nr_pages; i++) {
1370			int commit_write = 0, skip_page = 0;
1371			struct page *page = pvec.pages[i];
1372
1373			index = page->index;
1374			if (index > end)
1375				break;
1376
1377			if (index == size >> PAGE_CACHE_SHIFT)
1378				len = size & ~PAGE_CACHE_MASK;
1379			else
1380				len = PAGE_CACHE_SIZE;
1381			if (map) {
1382				cur_logical = index << (PAGE_CACHE_SHIFT -
1383							inode->i_blkbits);
1384				pblock = map->m_pblk + (cur_logical -
1385							map->m_lblk);
1386			}
1387			index++;
1388
1389			BUG_ON(!PageLocked(page));
1390			BUG_ON(PageWriteback(page));
1391
1392			/*
1393			 * If the page does not have buffers (for
1394			 * whatever reason), try to create them using
1395			 * __block_write_begin.  If this fails,
1396			 * skip the page and move on.
1397			 */
1398			if (!page_has_buffers(page)) {
1399				if (__block_write_begin(page, 0, len,
1400						noalloc_get_block_write)) {
1401				skip_page:
1402					unlock_page(page);
1403					continue;
1404				}
1405				commit_write = 1;
1406			}
1407
1408			bh = page_bufs = page_buffers(page);
1409			block_start = 0;
1410			do {
1411				if (!bh)
1412					goto skip_page;
1413				if (map && (cur_logical >= map->m_lblk) &&
1414				    (cur_logical <= (map->m_lblk +
1415						     (map->m_len - 1)))) {
1416					if (buffer_delay(bh)) {
1417						clear_buffer_delay(bh);
1418						bh->b_blocknr = pblock;
1419					}
1420					if (buffer_da_mapped(bh))
1421						clear_buffer_da_mapped(bh);
1422					if (buffer_unwritten(bh) ||
1423					    buffer_mapped(bh))
1424						BUG_ON(bh->b_blocknr != pblock);
1425					if (map->m_flags & EXT4_MAP_UNINIT)
1426						set_buffer_uninit(bh);
1427					clear_buffer_unwritten(bh);
1428				}
1429
1430				/*
1431				 * skip page if block allocation undone and
1432				 * block is dirty
1433				 */
1434				if (ext4_bh_delay_or_unwritten(NULL, bh))
1435					skip_page = 1;
1436				bh = bh->b_this_page;
1437				block_start += bh->b_size;
1438				cur_logical++;
1439				pblock++;
1440			} while (bh != page_bufs);
1441
1442			if (skip_page)
1443				goto skip_page;
1444
1445			if (commit_write)
1446				/* mark the buffer_heads as dirty & uptodate */
1447				block_commit_write(page, 0, len);
1448
1449			clear_page_dirty_for_io(page);
1450			/*
1451			 * Delalloc doesn't support data journalling,
1452			 * but eventually maybe we'll lift this
1453			 * restriction.
1454			 */
1455			if (unlikely(journal_data && PageChecked(page)))
1456				err = __ext4_journalled_writepage(page, len);
1457			else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1458				err = ext4_bio_write_page(&io_submit, page,
1459							  len, mpd->wbc);
1460			else if (buffer_uninit(page_bufs)) {
1461				ext4_set_bh_endio(page_bufs, inode);
1462				err = block_write_full_page_endio(page,
1463					noalloc_get_block_write,
1464					mpd->wbc, ext4_end_io_buffer_write);
1465			} else
1466				err = block_write_full_page(page,
1467					noalloc_get_block_write, mpd->wbc);
1468
1469			if (!err)
1470				mpd->pages_written++;
1471			/*
1472			 * In error case, we have to continue because
1473			 * remaining pages are still locked
1474			 */
1475			if (ret == 0)
1476				ret = err;
1477		}
1478		pagevec_release(&pvec);
1479	}
1480	ext4_io_submit(&io_submit);
1481	return ret;
1482}
1483
1484static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
 
1485{
1486	int nr_pages, i;
1487	pgoff_t index, end;
1488	struct pagevec pvec;
1489	struct inode *inode = mpd->inode;
1490	struct address_space *mapping = inode->i_mapping;
1491
 
 
 
 
1492	index = mpd->first_page;
1493	end   = mpd->next_page - 1;
 
 
 
 
 
 
 
 
1494	while (index <= end) {
1495		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1496		if (nr_pages == 0)
1497			break;
1498		for (i = 0; i < nr_pages; i++) {
1499			struct page *page = pvec.pages[i];
1500			if (page->index > end)
1501				break;
1502			BUG_ON(!PageLocked(page));
1503			BUG_ON(PageWriteback(page));
1504			block_invalidatepage(page, 0);
1505			ClearPageUptodate(page);
 
 
1506			unlock_page(page);
1507		}
1508		index = pvec.pages[nr_pages - 1]->index + 1;
1509		pagevec_release(&pvec);
1510	}
1511	return;
1512}
1513
1514static void ext4_print_free_blocks(struct inode *inode)
1515{
1516	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1517	struct super_block *sb = inode->i_sb;
 
1518
1519	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1520	       EXT4_C2B(EXT4_SB(inode->i_sb),
1521			ext4_count_free_clusters(inode->i_sb)));
1522	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1523	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1524	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1525		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1526	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1527	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1528		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1529	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1530	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1531		 EXT4_I(inode)->i_reserved_data_blocks);
1532	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1533	       EXT4_I(inode)->i_reserved_meta_blocks);
1534	return;
1535}
1536
1537/*
1538 * mpage_da_map_and_submit - go through given space, map them
1539 *       if necessary, and then submit them for I/O
1540 *
1541 * @mpd - bh describing space
1542 *
1543 * The function skips space we know is already mapped to disk blocks.
1544 *
1545 */
1546static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1547{
1548	int err, blks, get_blocks_flags;
1549	struct ext4_map_blocks map, *mapp = NULL;
1550	sector_t next = mpd->b_blocknr;
1551	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
1552	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
1553	handle_t *handle = NULL;
1554
1555	/*
1556	 * If the blocks are mapped already, or we couldn't accumulate
1557	 * any blocks, then proceed immediately to the submission stage.
1558	 */
1559	if ((mpd->b_size == 0) ||
1560	    ((mpd->b_state  & (1 << BH_Mapped)) &&
1561	     !(mpd->b_state & (1 << BH_Delay)) &&
1562	     !(mpd->b_state & (1 << BH_Unwritten))))
1563		goto submit_io;
1564
1565	handle = ext4_journal_current_handle();
1566	BUG_ON(!handle);
1567
1568	/*
1569	 * Call ext4_map_blocks() to allocate any delayed allocation
1570	 * blocks, or to convert an uninitialized extent to be
1571	 * initialized (in the case where we have written into
1572	 * one or more preallocated blocks).
1573	 *
1574	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
1575	 * indicate that we are on the delayed allocation path.  This
1576	 * affects functions in many different parts of the allocation
1577	 * call path.  This flag exists primarily because we don't
1578	 * want to change *many* call functions, so ext4_map_blocks()
1579	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
1580	 * inode's allocation semaphore is taken.
1581	 *
1582	 * If the blocks in questions were delalloc blocks, set
1583	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
1584	 * variables are updated after the blocks have been allocated.
1585	 */
1586	map.m_lblk = next;
1587	map.m_len = max_blocks;
1588	get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1589	if (ext4_should_dioread_nolock(mpd->inode))
1590		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1591	if (mpd->b_state & (1 << BH_Delay))
1592		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1593
1594	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1595	if (blks < 0) {
1596		struct super_block *sb = mpd->inode->i_sb;
1597
1598		err = blks;
1599		/*
1600		 * If get block returns EAGAIN or ENOSPC and there
1601		 * appears to be free blocks we will just let
1602		 * mpage_da_submit_io() unlock all of the pages.
1603		 */
1604		if (err == -EAGAIN)
1605			goto submit_io;
1606
1607		if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1608			mpd->retval = err;
1609			goto submit_io;
1610		}
1611
1612		/*
1613		 * get block failure will cause us to loop in
1614		 * writepages, because a_ops->writepage won't be able
1615		 * to make progress. The page will be redirtied by
1616		 * writepage and writepages will again try to write
1617		 * the same.
1618		 */
1619		if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1620			ext4_msg(sb, KERN_CRIT,
1621				 "delayed block allocation failed for inode %lu "
1622				 "at logical offset %llu with max blocks %zd "
1623				 "with error %d", mpd->inode->i_ino,
1624				 (unsigned long long) next,
1625				 mpd->b_size >> mpd->inode->i_blkbits, err);
1626			ext4_msg(sb, KERN_CRIT,
1627				"This should not happen!! Data will be lost\n");
1628			if (err == -ENOSPC)
1629				ext4_print_free_blocks(mpd->inode);
1630		}
1631		/* invalidate all the pages */
1632		ext4_da_block_invalidatepages(mpd);
1633
1634		/* Mark this page range as having been completed */
1635		mpd->io_done = 1;
1636		return;
1637	}
1638	BUG_ON(blks == 0);
1639
1640	mapp = &map;
1641	if (map.m_flags & EXT4_MAP_NEW) {
1642		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
1643		int i;
1644
1645		for (i = 0; i < map.m_len; i++)
1646			unmap_underlying_metadata(bdev, map.m_pblk + i);
1647
1648		if (ext4_should_order_data(mpd->inode)) {
1649			err = ext4_jbd2_file_inode(handle, mpd->inode);
1650			if (err) {
1651				/* Only if the journal is aborted */
1652				mpd->retval = err;
1653				goto submit_io;
1654			}
1655		}
1656	}
1657
1658	/*
1659	 * Update on-disk size along with block allocation.
1660	 */
1661	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
1662	if (disksize > i_size_read(mpd->inode))
1663		disksize = i_size_read(mpd->inode);
1664	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
1665		ext4_update_i_disksize(mpd->inode, disksize);
1666		err = ext4_mark_inode_dirty(handle, mpd->inode);
1667		if (err)
1668			ext4_error(mpd->inode->i_sb,
1669				   "Failed to mark inode %lu dirty",
1670				   mpd->inode->i_ino);
1671	}
1672
1673submit_io:
1674	mpage_da_submit_io(mpd, mapp);
1675	mpd->io_done = 1;
1676}
1677
1678#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1679		(1 << BH_Delay) | (1 << BH_Unwritten))
1680
1681/*
1682 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
1683 *
1684 * @mpd->lbh - extent of blocks
1685 * @logical - logical number of the block in the file
1686 * @bh - bh of the block (used to access block's state)
1687 *
1688 * the function is used to collect contig. blocks in same state
1689 */
1690static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1691				   sector_t logical, size_t b_size,
1692				   unsigned long b_state)
1693{
1694	sector_t next;
1695	int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
1696
1697	/*
1698	 * XXX Don't go larger than mballoc is willing to allocate
1699	 * This is a stopgap solution.  We eventually need to fold
1700	 * mpage_da_submit_io() into this function and then call
1701	 * ext4_map_blocks() multiple times in a loop
1702	 */
1703	if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1704		goto flush_it;
1705
1706	/* check if thereserved journal credits might overflow */
1707	if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1708		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1709			/*
1710			 * With non-extent format we are limited by the journal
1711			 * credit available.  Total credit needed to insert
1712			 * nrblocks contiguous blocks is dependent on the
1713			 * nrblocks.  So limit nrblocks.
1714			 */
1715			goto flush_it;
1716		} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1717				EXT4_MAX_TRANS_DATA) {
1718			/*
1719			 * Adding the new buffer_head would make it cross the
1720			 * allowed limit for which we have journal credit
1721			 * reserved. So limit the new bh->b_size
1722			 */
1723			b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1724						mpd->inode->i_blkbits;
1725			/* we will do mpage_da_submit_io in the next loop */
1726		}
1727	}
1728	/*
1729	 * First block in the extent
1730	 */
1731	if (mpd->b_size == 0) {
1732		mpd->b_blocknr = logical;
1733		mpd->b_size = b_size;
1734		mpd->b_state = b_state & BH_FLAGS;
1735		return;
1736	}
1737
1738	next = mpd->b_blocknr + nrblocks;
1739	/*
1740	 * Can we merge the block to our big extent?
1741	 */
1742	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1743		mpd->b_size += b_size;
1744		return;
1745	}
1746
1747flush_it:
1748	/*
1749	 * We couldn't merge the block to our extent, so we
1750	 * need to flush current  extent and start new one
1751	 */
1752	mpage_da_map_and_submit(mpd);
1753	return;
1754}
1755
1756static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1757{
1758	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1759}
1760
1761/*
1762 * This function is grabs code from the very beginning of
1763 * ext4_map_blocks, but assumes that the caller is from delayed write
1764 * time. This function looks up the requested blocks and sets the
1765 * buffer delay bit under the protection of i_data_sem.
1766 */
1767static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1768			      struct ext4_map_blocks *map,
1769			      struct buffer_head *bh)
1770{
 
1771	int retval;
1772	sector_t invalid_block = ~((sector_t) 0xffff);
 
 
 
 
 
1773
1774	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1775		invalid_block = ~0;
1776
1777	map->m_flags = 0;
1778	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1779		  "logical block %lu\n", inode->i_ino, map->m_len,
1780		  (unsigned long) map->m_lblk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1781	/*
1782	 * Try to see if we can get the block without requesting a new
1783	 * file system block.
1784	 */
1785	down_read((&EXT4_I(inode)->i_data_sem));
1786	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1787		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
1788	else
1789		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
 
1790
 
1791	if (retval == 0) {
 
1792		/*
1793		 * XXX: __block_prepare_write() unmaps passed block,
1794		 * is it OK?
1795		 */
1796		/* If the block was allocated from previously allocated cluster,
1797		 * then we dont need to reserve it again. */
 
 
 
1798		if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1799			retval = ext4_da_reserve_space(inode, iblock);
1800			if (retval)
1801				/* not enough space to reserve */
 
1802				goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1803		}
1804
1805		/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1806		 * and it should not appear on the bh->b_state.
1807		 */
1808		map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1809
1810		map_bh(bh, inode->i_sb, invalid_block);
1811		set_buffer_new(bh);
1812		set_buffer_delay(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1813	}
1814
1815out_unlock:
1816	up_read((&EXT4_I(inode)->i_data_sem));
1817
1818	return retval;
1819}
1820
1821/*
1822 * This is a special get_blocks_t callback which is used by
1823 * ext4_da_write_begin().  It will either return mapped block or
1824 * reserve space for a single block.
1825 *
1826 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1827 * We also have b_blocknr = -1 and b_bdev initialized properly
1828 *
1829 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1830 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1831 * initialized properly.
1832 */
1833static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1834				  struct buffer_head *bh, int create)
1835{
1836	struct ext4_map_blocks map;
1837	int ret = 0;
1838
1839	BUG_ON(create == 0);
1840	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1841
1842	map.m_lblk = iblock;
1843	map.m_len = 1;
1844
1845	/*
1846	 * first, we need to know whether the block is allocated already
1847	 * preallocated blocks are unmapped but should treated
1848	 * the same as allocated blocks.
1849	 */
1850	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1851	if (ret <= 0)
1852		return ret;
1853
1854	map_bh(bh, inode->i_sb, map.m_pblk);
1855	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1856
1857	if (buffer_unwritten(bh)) {
1858		/* A delayed write to unwritten bh should be marked
1859		 * new and mapped.  Mapped ensures that we don't do
1860		 * get_block multiple times when we write to the same
1861		 * offset and new ensures that we do proper zero out
1862		 * for partial write.
1863		 */
1864		set_buffer_new(bh);
1865		set_buffer_mapped(bh);
1866	}
1867	return 0;
1868}
1869
1870/*
1871 * This function is used as a standard get_block_t calback function
1872 * when there is no desire to allocate any blocks.  It is used as a
1873 * callback function for block_write_begin() and block_write_full_page().
1874 * These functions should only try to map a single block at a time.
1875 *
1876 * Since this function doesn't do block allocations even if the caller
1877 * requests it by passing in create=1, it is critically important that
1878 * any caller checks to make sure that any buffer heads are returned
1879 * by this function are either all already mapped or marked for
1880 * delayed allocation before calling  block_write_full_page().  Otherwise,
1881 * b_blocknr could be left unitialized, and the page write functions will
1882 * be taken by surprise.
1883 */
1884static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1885				   struct buffer_head *bh_result, int create)
1886{
1887	BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1888	return _ext4_get_block(inode, iblock, bh_result, 0);
1889}
1890
1891static int bget_one(handle_t *handle, struct buffer_head *bh)
1892{
1893	get_bh(bh);
1894	return 0;
1895}
1896
1897static int bput_one(handle_t *handle, struct buffer_head *bh)
1898{
1899	put_bh(bh);
1900	return 0;
1901}
1902
1903static int __ext4_journalled_writepage(struct page *page,
1904				       unsigned int len)
1905{
1906	struct address_space *mapping = page->mapping;
1907	struct inode *inode = mapping->host;
1908	struct buffer_head *page_bufs;
1909	handle_t *handle = NULL;
1910	int ret = 0;
1911	int err;
 
1912
1913	ClearPageChecked(page);
1914	page_bufs = page_buffers(page);
1915	BUG_ON(!page_bufs);
1916	walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
 
 
 
 
 
 
 
 
 
 
 
 
 
1917	/* As soon as we unlock the page, it can go away, but we have
1918	 * references to buffers so we are safe */
1919	unlock_page(page);
1920
1921	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
 
1922	if (IS_ERR(handle)) {
1923		ret = PTR_ERR(handle);
1924		goto out;
1925	}
1926
1927	BUG_ON(!ext4_handle_valid(handle));
1928
1929	ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1930				do_journal_get_write_access);
 
 
1931
1932	err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1933				write_end_fn);
 
 
 
 
 
1934	if (ret == 0)
1935		ret = err;
1936	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1937	err = ext4_journal_stop(handle);
1938	if (!ret)
1939		ret = err;
1940
1941	walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
 
 
1942	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1943out:
 
1944	return ret;
1945}
1946
1947static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1948static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1949
1950/*
1951 * Note that we don't need to start a transaction unless we're journaling data
1952 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1953 * need to file the inode to the transaction's list in ordered mode because if
1954 * we are writing back data added by write(), the inode is already there and if
1955 * we are writing back data modified via mmap(), no one guarantees in which
1956 * transaction the data will hit the disk. In case we are journaling data, we
1957 * cannot start transaction directly because transaction start ranks above page
1958 * lock so we have to do some magic.
1959 *
1960 * This function can get called via...
1961 *   - ext4_da_writepages after taking page lock (have journal handle)
1962 *   - journal_submit_inode_data_buffers (no journal handle)
1963 *   - shrink_page_list via pdflush (no journal handle)
1964 *   - grab_page_cache when doing write_begin (have journal handle)
1965 *
1966 * We don't do any block allocation in this function. If we have page with
1967 * multiple blocks we need to write those buffer_heads that are mapped. This
1968 * is important for mmaped based write. So if we do with blocksize 1K
1969 * truncate(f, 1024);
1970 * a = mmap(f, 0, 4096);
1971 * a[0] = 'a';
1972 * truncate(f, 4096);
1973 * we have in the page first buffer_head mapped via page_mkwrite call back
1974 * but other buffer_heads would be unmapped but dirty (dirty done via the
1975 * do_wp_page). So writepage should write the first block. If we modify
1976 * the mmap area beyond 1024 we will again get a page_fault and the
1977 * page_mkwrite callback will do the block allocation and mark the
1978 * buffer_heads mapped.
1979 *
1980 * We redirty the page if we have any buffer_heads that is either delay or
1981 * unwritten in the page.
1982 *
1983 * We can get recursively called as show below.
1984 *
1985 *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1986 *		ext4_writepage()
1987 *
1988 * But since we don't do any block allocation we should not deadlock.
1989 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1990 */
1991static int ext4_writepage(struct page *page,
1992			  struct writeback_control *wbc)
1993{
1994	int ret = 0, commit_write = 0;
1995	loff_t size;
1996	unsigned int len;
1997	struct buffer_head *page_bufs = NULL;
1998	struct inode *inode = page->mapping->host;
 
1999
2000	trace_ext4_writepage(page);
2001	size = i_size_read(inode);
2002	if (page->index == size >> PAGE_CACHE_SHIFT)
2003		len = size & ~PAGE_CACHE_MASK;
2004	else
2005		len = PAGE_CACHE_SIZE;
2006
 
2007	/*
2008	 * If the page does not have buffers (for whatever reason),
2009	 * try to create them using __block_write_begin.  If this
2010	 * fails, redirty the page and move on.
2011	 */
2012	if (!page_has_buffers(page)) {
2013		if (__block_write_begin(page, 0, len,
2014					noalloc_get_block_write)) {
2015		redirty_page:
2016			redirty_page_for_writepage(wbc, page);
 
 
 
 
 
 
 
 
2017			unlock_page(page);
2018			return 0;
2019		}
2020		commit_write = 1;
2021	}
2022	page_bufs = page_buffers(page);
2023	if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2024			      ext4_bh_delay_or_unwritten)) {
2025		/*
2026		 * We don't want to do block allocation, so redirty
2027		 * the page and return.  We may reach here when we do
2028		 * a journal commit via journal_submit_inode_data_buffers.
2029		 * We can also reach here via shrink_page_list but it
2030		 * should never be for direct reclaim so warn if that
2031		 * happens
2032		 */
2033		WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2034								PF_MEMALLOC);
2035		goto redirty_page;
2036	}
2037	if (commit_write)
2038		/* now mark the buffer_heads as dirty and uptodate */
2039		block_commit_write(page, 0, len);
2040
2041	if (PageChecked(page) && ext4_should_journal_data(inode))
2042		/*
2043		 * It's mmapped pagecache.  Add buffers and journal it.  There
2044		 * doesn't seem much point in redirtying the page here.
2045		 */
2046		return __ext4_journalled_writepage(page, len);
2047
2048	if (buffer_uninit(page_bufs)) {
2049		ext4_set_bh_endio(page_bufs, inode);
2050		ret = block_write_full_page_endio(page, noalloc_get_block_write,
2051					    wbc, ext4_end_io_buffer_write);
2052	} else
2053		ret = block_write_full_page(page, noalloc_get_block_write,
2054					    wbc);
2055
 
 
 
2056	return ret;
2057}
2058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2059/*
2060 * This is called via ext4_da_writepages() to
2061 * calculate the total number of credits to reserve to fit
2062 * a single extent allocation into a single transaction,
2063 * ext4_da_writpeages() will loop calling this before
2064 * the block allocation.
2065 */
 
2066
2067static int ext4_da_writepages_trans_blocks(struct inode *inode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2068{
2069	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
 
 
 
 
 
 
 
 
 
2070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071	/*
2072	 * With non-extent format the journal credit needed to
2073	 * insert nrblocks contiguous block is dependent on
2074	 * number of contiguous block. So we will limit
2075	 * number of contiguous block to a sane value
 
 
 
 
 
 
 
 
 
 
 
2076	 */
2077	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2078	    (max_blocks > EXT4_MAX_TRANS_DATA))
2079		max_blocks = EXT4_MAX_TRANS_DATA;
 
 
 
2080
2081	return ext4_chunk_trans_blocks(inode, max_blocks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2082}
2083
2084/*
2085 * write_cache_pages_da - walk the list of dirty pages of the given
2086 * address space and accumulate pages that need writing, and call
2087 * mpage_da_map_and_submit to map a single contiguous memory region
2088 * and then write them.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2089 */
2090static int write_cache_pages_da(struct address_space *mapping,
2091				struct writeback_control *wbc,
2092				struct mpage_da_data *mpd,
2093				pgoff_t *done_index)
2094{
2095	struct buffer_head	*bh, *head;
2096	struct inode		*inode = mapping->host;
2097	struct pagevec		pvec;
2098	unsigned int		nr_pages;
2099	sector_t		logical;
2100	pgoff_t			index, end;
2101	long			nr_to_write = wbc->nr_to_write;
2102	int			i, tag, ret = 0;
2103
2104	memset(mpd, 0, sizeof(struct mpage_da_data));
2105	mpd->wbc = wbc;
2106	mpd->inode = inode;
2107	pagevec_init(&pvec, 0);
2108	index = wbc->range_start >> PAGE_CACHE_SHIFT;
2109	end = wbc->range_end >> PAGE_CACHE_SHIFT;
2110
2111	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112		tag = PAGECACHE_TAG_TOWRITE;
2113	else
2114		tag = PAGECACHE_TAG_DIRTY;
2115
2116	*done_index = index;
 
 
2117	while (index <= end) {
2118		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2119			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2120		if (nr_pages == 0)
2121			return 0;
2122
2123		for (i = 0; i < nr_pages; i++) {
2124			struct page *page = pvec.pages[i];
2125
2126			/*
2127			 * At this point, the page may be truncated or
2128			 * invalidated (changing page->mapping to NULL), or
2129			 * even swizzled back from swapper_space to tmpfs file
2130			 * mapping. However, page->index will not change
2131			 * because we have a reference on the page.
2132			 */
2133			if (page->index > end)
2134				goto out;
2135
2136			*done_index = page->index + 1;
2137
2138			/*
2139			 * If we can't merge this page, and we have
2140			 * accumulated an contiguous region, write it
 
 
 
 
2141			 */
2142			if ((mpd->next_page != page->index) &&
2143			    (mpd->next_page != mpd->first_page)) {
2144				mpage_da_map_and_submit(mpd);
2145				goto ret_extent_tail;
2146			}
2147
2148			lock_page(page);
 
 
2149
 
2150			/*
2151			 * If the page is no longer dirty, or its
2152			 * mapping no longer corresponds to inode we
2153			 * are writing (which means it has been
2154			 * truncated or invalidated), or the page is
2155			 * already under writeback and we are not
2156			 * doing a data integrity writeback, skip the page
2157			 */
2158			if (!PageDirty(page) ||
2159			    (PageWriteback(page) &&
2160			     (wbc->sync_mode == WB_SYNC_NONE)) ||
2161			    unlikely(page->mapping != mapping)) {
2162				unlock_page(page);
2163				continue;
2164			}
2165
2166			wait_on_page_writeback(page);
2167			BUG_ON(PageWriteback(page));
2168
2169			if (mpd->next_page != page->index)
2170				mpd->first_page = page->index;
2171			mpd->next_page = page->index + 1;
2172			logical = (sector_t) page->index <<
2173				(PAGE_CACHE_SHIFT - inode->i_blkbits);
2174
2175			if (!page_has_buffers(page)) {
2176				mpage_add_bh_to_extent(mpd, logical,
2177						       PAGE_CACHE_SIZE,
2178						       (1 << BH_Dirty) | (1 << BH_Uptodate));
2179				if (mpd->io_done)
2180					goto ret_extent_tail;
2181			} else {
2182				/*
2183				 * Page with regular buffer heads,
2184				 * just add all dirty ones
2185				 */
2186				head = page_buffers(page);
2187				bh = head;
2188				do {
2189					BUG_ON(buffer_locked(bh));
2190					/*
2191					 * We need to try to allocate
2192					 * unmapped blocks in the same page.
2193					 * Otherwise we won't make progress
2194					 * with the page in ext4_writepage
2195					 */
2196					if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2197						mpage_add_bh_to_extent(mpd, logical,
2198								       bh->b_size,
2199								       bh->b_state);
2200						if (mpd->io_done)
2201							goto ret_extent_tail;
2202					} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2203						/*
2204						 * mapped dirty buffer. We need
2205						 * to update the b_state
2206						 * because we look at b_state
2207						 * in mpage_da_map_blocks.  We
2208						 * don't update b_size because
2209						 * if we find an unmapped
2210						 * buffer_head later we need to
2211						 * use the b_state flag of that
2212						 * buffer_head.
2213						 */
2214						if (mpd->b_size == 0)
2215							mpd->b_state = bh->b_state & BH_FLAGS;
2216					}
2217					logical++;
2218				} while ((bh = bh->b_this_page) != head);
2219			}
2220
2221			if (nr_to_write > 0) {
2222				nr_to_write--;
2223				if (nr_to_write == 0 &&
2224				    wbc->sync_mode == WB_SYNC_NONE)
2225					/*
2226					 * We stop writing back only if we are
2227					 * not doing integrity sync. In case of
2228					 * integrity sync we have to keep going
2229					 * because someone may be concurrently
2230					 * dirtying pages, and we might have
2231					 * synced a lot of newly appeared dirty
2232					 * pages, but have not synced all of the
2233					 * old dirty pages.
2234					 */
2235					goto out;
2236			}
2237		}
2238		pagevec_release(&pvec);
2239		cond_resched();
2240	}
2241	return 0;
2242ret_extent_tail:
2243	ret = MPAGE_DA_EXTENT_TAIL;
2244out:
2245	pagevec_release(&pvec);
2246	cond_resched();
2247	return ret;
2248}
2249
 
 
 
 
 
 
 
 
2250
2251static int ext4_da_writepages(struct address_space *mapping,
2252			      struct writeback_control *wbc)
2253{
2254	pgoff_t	index;
 
2255	int range_whole = 0;
 
2256	handle_t *handle = NULL;
2257	struct mpage_da_data mpd;
2258	struct inode *inode = mapping->host;
2259	int pages_written = 0;
2260	unsigned int max_pages;
2261	int range_cyclic, cycled = 1, io_done = 0;
2262	int needed_blocks, ret = 0;
2263	long desired_nr_to_write, nr_to_writebump = 0;
2264	loff_t range_start = wbc->range_start;
2265	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2266	pgoff_t done_index = 0;
2267	pgoff_t end;
2268	struct blk_plug plug;
 
2269
2270	trace_ext4_da_writepages(inode, wbc);
2271
2272	/*
2273	 * No pages to write? This is mainly a kludge to avoid starting
2274	 * a transaction for special inodes like journal inode on last iput()
2275	 * because that could violate lock ordering on umount
2276	 */
2277	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2278		return 0;
 
 
 
 
 
 
 
 
 
2279
2280	/*
2281	 * If the filesystem has aborted, it is read-only, so return
2282	 * right away instead of dumping stack traces later on that
2283	 * will obscure the real source of the problem.  We test
2284	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2285	 * the latter could be true if the filesystem is mounted
2286	 * read-only, and in that case, ext4_da_writepages should
2287	 * *never* be called, so if that ever happens, we would want
2288	 * the stack trace.
2289	 */
2290	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2291		return -EROFS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2292
2293	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2294		range_whole = 1;
2295
2296	range_cyclic = wbc->range_cyclic;
2297	if (wbc->range_cyclic) {
2298		index = mapping->writeback_index;
2299		if (index)
2300			cycled = 0;
2301		wbc->range_start = index << PAGE_CACHE_SHIFT;
2302		wbc->range_end  = LLONG_MAX;
2303		wbc->range_cyclic = 0;
2304		end = -1;
2305	} else {
2306		index = wbc->range_start >> PAGE_CACHE_SHIFT;
2307		end = wbc->range_end >> PAGE_CACHE_SHIFT;
2308	}
2309
2310	/*
2311	 * This works around two forms of stupidity.  The first is in
2312	 * the writeback code, which caps the maximum number of pages
2313	 * written to be 1024 pages.  This is wrong on multiple
2314	 * levels; different architectues have a different page size,
2315	 * which changes the maximum amount of data which gets
2316	 * written.  Secondly, 4 megabytes is way too small.  XFS
2317	 * forces this value to be 16 megabytes by multiplying
2318	 * nr_to_write parameter by four, and then relies on its
2319	 * allocator to allocate larger extents to make them
2320	 * contiguous.  Unfortunately this brings us to the second
2321	 * stupidity, which is that ext4's mballoc code only allocates
2322	 * at most 2048 blocks.  So we force contiguous writes up to
2323	 * the number of dirty blocks in the inode, or
2324	 * sbi->max_writeback_mb_bump whichever is smaller.
2325	 */
2326	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2327	if (!range_cyclic && range_whole) {
2328		if (wbc->nr_to_write == LONG_MAX)
2329			desired_nr_to_write = wbc->nr_to_write;
2330		else
2331			desired_nr_to_write = wbc->nr_to_write * 8;
2332	} else
2333		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2334							   max_pages);
2335	if (desired_nr_to_write > max_pages)
2336		desired_nr_to_write = max_pages;
2337
2338	if (wbc->nr_to_write < desired_nr_to_write) {
2339		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2340		wbc->nr_to_write = desired_nr_to_write;
2341	}
2342
 
 
 
2343retry:
2344	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2345		tag_pages_for_writeback(mapping, index, end);
2346
2347	blk_start_plug(&plug);
2348	while (!ret && wbc->nr_to_write > 0) {
 
 
 
 
 
 
2349
2350		/*
2351		 * we  insert one extent at a time. So we need
2352		 * credit needed for single extent allocation.
2353		 * journalled mode is currently not supported
2354		 * by delalloc
 
2355		 */
2356		BUG_ON(ext4_should_journal_data(inode));
2357		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2358
2359		/* start a new transaction*/
2360		handle = ext4_journal_start(inode, needed_blocks);
 
2361		if (IS_ERR(handle)) {
2362			ret = PTR_ERR(handle);
2363			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2364			       "%ld pages, ino %lu; err %d", __func__,
2365				wbc->nr_to_write, inode->i_ino, ret);
2366			blk_finish_plug(&plug);
2367			goto out_writepages;
 
2368		}
2369
2370		/*
2371		 * Now call write_cache_pages_da() to find the next
2372		 * contiguous region of logical blocks that need
2373		 * blocks to be allocated by ext4 and submit them.
2374		 */
2375		ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2376		/*
2377		 * If we have a contiguous extent of pages and we
2378		 * haven't done the I/O yet, map the blocks and submit
2379		 * them for I/O.
2380		 */
2381		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2382			mpage_da_map_and_submit(&mpd);
2383			ret = MPAGE_DA_EXTENT_TAIL;
 
2384		}
2385		trace_ext4_da_write_pages(inode, &mpd);
2386		wbc->nr_to_write -= mpd.pages_written;
2387
2388		ext4_journal_stop(handle);
 
 
 
 
 
 
2389
2390		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2391			/* commit the transaction which would
 
2392			 * free blocks released in the transaction
2393			 * and try again
2394			 */
2395			jbd2_journal_force_commit_nested(sbi->s_journal);
2396			ret = 0;
2397		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
2398			/*
2399			 * Got one extent now try with rest of the pages.
2400			 * If mpd.retval is set -EIO, journal is aborted.
2401			 * So we don't need to write any more.
2402			 */
2403			pages_written += mpd.pages_written;
2404			ret = mpd.retval;
2405			io_done = 1;
2406		} else if (wbc->nr_to_write)
2407			/*
2408			 * There is no more writeout needed
2409			 * or we requested for a noblocking writeout
2410			 * and we found the device congested
2411			 */
2412			break;
2413	}
2414	blk_finish_plug(&plug);
2415	if (!io_done && !cycled) {
2416		cycled = 1;
2417		index = 0;
2418		wbc->range_start = index << PAGE_CACHE_SHIFT;
2419		wbc->range_end  = mapping->writeback_index - 1;
2420		goto retry;
2421	}
2422
2423	/* Update index */
2424	wbc->range_cyclic = range_cyclic;
2425	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2426		/*
2427		 * set the writeback_index so that range_cyclic
2428		 * mode will write it back later
2429		 */
2430		mapping->writeback_index = done_index;
2431
2432out_writepages:
2433	wbc->nr_to_write -= nr_to_writebump;
2434	wbc->range_start = range_start;
2435	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2436	return ret;
2437}
2438
2439#define FALL_BACK_TO_NONDELALLOC 1
2440static int ext4_nonda_switch(struct super_block *sb)
2441{
2442	s64 free_blocks, dirty_blocks;
2443	struct ext4_sb_info *sbi = EXT4_SB(sb);
2444
2445	/*
2446	 * switch to non delalloc mode if we are running low
2447	 * on free block. The free block accounting via percpu
2448	 * counters can get slightly wrong with percpu_counter_batch getting
2449	 * accumulated on each CPU without updating global counters
2450	 * Delalloc need an accurate free block accounting. So switch
2451	 * to non delalloc when we are near to error range.
2452	 */
2453	free_blocks  = EXT4_C2B(sbi,
2454		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2455	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2456	if (2 * free_blocks < 3 * dirty_blocks ||
2457		free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
 
 
 
 
 
 
 
2458		/*
2459		 * free block count is less than 150% of dirty blocks
2460		 * or free blocks is less than watermark
2461		 */
2462		return 1;
2463	}
2464	/*
2465	 * Even if we don't switch but are nearing capacity,
2466	 * start pushing delalloc when 1/2 of free blocks are dirty.
2467	 */
2468	if (free_blocks < 2 * dirty_blocks)
2469		writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
2470
2471	return 0;
2472}
2473
2474static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2475			       loff_t pos, unsigned len, unsigned flags,
2476			       struct page **pagep, void **fsdata)
2477{
2478	int ret, retries = 0;
2479	struct page *page;
2480	pgoff_t index;
2481	struct inode *inode = mapping->host;
2482	handle_t *handle;
2483
2484	index = pos >> PAGE_CACHE_SHIFT;
2485
2486	if (ext4_nonda_switch(inode->i_sb)) {
2487		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2488		return ext4_write_begin(file, mapping, pos,
2489					len, flags, pagep, fsdata);
2490	}
2491	*fsdata = (void *)0;
2492	trace_ext4_da_write_begin(inode, pos, len, flags);
2493retry:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494	/*
2495	 * With delayed allocation, we don't log the i_disksize update
2496	 * if there is delayed block allocation. But we still need
2497	 * to journalling the i_disksize update if writes to the end
2498	 * of file which has an already mapped buffer.
2499	 */
2500	handle = ext4_journal_start(inode, 1);
 
2501	if (IS_ERR(handle)) {
2502		ret = PTR_ERR(handle);
2503		goto out;
2504	}
2505	/* We cannot recurse into the filesystem as the transaction is already
2506	 * started */
2507	flags |= AOP_FLAG_NOFS;
2508
2509	page = grab_cache_page_write_begin(mapping, index, flags);
2510	if (!page) {
 
 
 
2511		ext4_journal_stop(handle);
2512		ret = -ENOMEM;
2513		goto out;
2514	}
2515	*pagep = page;
 
2516
2517	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2518	if (ret < 0) {
2519		unlock_page(page);
2520		ext4_journal_stop(handle);
2521		page_cache_release(page);
2522		/*
2523		 * block_write_begin may have instantiated a few blocks
2524		 * outside i_size.  Trim these off again. Don't need
2525		 * i_size_read because we hold i_mutex.
2526		 */
2527		if (pos + len > inode->i_size)
2528			ext4_truncate_failed_write(inode);
 
 
 
 
 
 
 
2529	}
2530
2531	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2532		goto retry;
2533out:
2534	return ret;
2535}
2536
2537/*
2538 * Check if we should update i_disksize
2539 * when write to the end of file but not require block allocation
2540 */
2541static int ext4_da_should_update_i_disksize(struct page *page,
2542					    unsigned long offset)
2543{
2544	struct buffer_head *bh;
2545	struct inode *inode = page->mapping->host;
2546	unsigned int idx;
2547	int i;
2548
2549	bh = page_buffers(page);
2550	idx = offset >> inode->i_blkbits;
2551
2552	for (i = 0; i < idx; i++)
2553		bh = bh->b_this_page;
2554
2555	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2556		return 0;
2557	return 1;
2558}
2559
2560static int ext4_da_write_end(struct file *file,
2561			     struct address_space *mapping,
2562			     loff_t pos, unsigned len, unsigned copied,
2563			     struct page *page, void *fsdata)
2564{
2565	struct inode *inode = mapping->host;
2566	int ret = 0, ret2;
2567	handle_t *handle = ext4_journal_current_handle();
2568	loff_t new_i_size;
2569	unsigned long start, end;
2570	int write_mode = (int)(unsigned long)fsdata;
2571
2572	if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2573		switch (ext4_inode_journal_mode(inode)) {
2574		case EXT4_INODE_ORDERED_DATA_MODE:
2575			return ext4_ordered_write_end(file, mapping, pos,
2576					len, copied, page, fsdata);
2577		case EXT4_INODE_WRITEBACK_DATA_MODE:
2578			return ext4_writeback_write_end(file, mapping, pos,
2579					len, copied, page, fsdata);
2580		default:
2581			BUG();
2582		}
2583	}
2584
2585	trace_ext4_da_write_end(inode, pos, len, copied);
2586	start = pos & (PAGE_CACHE_SIZE - 1);
2587	end = start + copied - 1;
2588
2589	/*
2590	 * generic_write_end() will run mark_inode_dirty() if i_size
2591	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
2592	 * into that.
2593	 */
2594
2595	new_i_size = pos + copied;
2596	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2597		if (ext4_da_should_update_i_disksize(page, end)) {
 
2598			down_write(&EXT4_I(inode)->i_data_sem);
2599			if (new_i_size > EXT4_I(inode)->i_disksize) {
2600				/*
2601				 * Updating i_disksize when extending file
2602				 * without needing block allocation
2603				 */
2604				if (ext4_should_order_data(inode))
2605					ret = ext4_jbd2_file_inode(handle,
2606								   inode);
2607
2608				EXT4_I(inode)->i_disksize = new_i_size;
2609			}
2610			up_write(&EXT4_I(inode)->i_data_sem);
2611			/* We need to mark inode dirty even if
2612			 * new_i_size is less that inode->i_size
2613			 * bu greater than i_disksize.(hint delalloc)
2614			 */
2615			ext4_mark_inode_dirty(handle, inode);
2616		}
2617	}
2618	ret2 = generic_write_end(file, mapping, pos, len, copied,
 
 
 
 
 
 
 
2619							page, fsdata);
 
2620	copied = ret2;
2621	if (ret2 < 0)
2622		ret = ret2;
2623	ret2 = ext4_journal_stop(handle);
2624	if (!ret)
2625		ret = ret2;
2626
2627	return ret ? ret : copied;
2628}
2629
2630static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
 
2631{
2632	/*
2633	 * Drop reserved blocks
2634	 */
2635	BUG_ON(!PageLocked(page));
2636	if (!page_has_buffers(page))
2637		goto out;
2638
2639	ext4_da_page_release_reservation(page, offset);
2640
2641out:
2642	ext4_invalidatepage(page, offset);
2643
2644	return;
2645}
2646
2647/*
2648 * Force all delayed allocation blocks to be allocated for a given inode.
2649 */
2650int ext4_alloc_da_blocks(struct inode *inode)
2651{
2652	trace_ext4_alloc_da_blocks(inode);
2653
2654	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2655	    !EXT4_I(inode)->i_reserved_meta_blocks)
2656		return 0;
2657
2658	/*
2659	 * We do something simple for now.  The filemap_flush() will
2660	 * also start triggering a write of the data blocks, which is
2661	 * not strictly speaking necessary (and for users of
2662	 * laptop_mode, not even desirable).  However, to do otherwise
2663	 * would require replicating code paths in:
2664	 *
2665	 * ext4_da_writepages() ->
2666	 *    write_cache_pages() ---> (via passed in callback function)
2667	 *        __mpage_da_writepage() -->
2668	 *           mpage_add_bh_to_extent()
2669	 *           mpage_da_map_blocks()
2670	 *
2671	 * The problem is that write_cache_pages(), located in
2672	 * mm/page-writeback.c, marks pages clean in preparation for
2673	 * doing I/O, which is not desirable if we're not planning on
2674	 * doing I/O at all.
2675	 *
2676	 * We could call write_cache_pages(), and then redirty all of
2677	 * the pages by calling redirty_page_for_writepage() but that
2678	 * would be ugly in the extreme.  So instead we would need to
2679	 * replicate parts of the code in the above functions,
2680	 * simplifying them because we wouldn't actually intend to
2681	 * write out the pages, but rather only collect contiguous
2682	 * logical block extents, call the multi-block allocator, and
2683	 * then update the buffer heads with the block allocations.
2684	 *
2685	 * For now, though, we'll cheat by calling filemap_flush(),
2686	 * which will map the blocks, and start the I/O, but not
2687	 * actually wait for the I/O to complete.
2688	 */
2689	return filemap_flush(inode->i_mapping);
2690}
2691
2692/*
2693 * bmap() is special.  It gets used by applications such as lilo and by
2694 * the swapper to find the on-disk block of a specific piece of data.
2695 *
2696 * Naturally, this is dangerous if the block concerned is still in the
2697 * journal.  If somebody makes a swapfile on an ext4 data-journaling
2698 * filesystem and enables swap, then they may get a nasty shock when the
2699 * data getting swapped to that swapfile suddenly gets overwritten by
2700 * the original zero's written out previously to the journal and
2701 * awaiting writeback in the kernel's buffer cache.
2702 *
2703 * So, if we see any bmap calls here on a modified, data-journaled file,
2704 * take extra steps to flush any blocks which might be in the cache.
2705 */
2706static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2707{
2708	struct inode *inode = mapping->host;
2709	journal_t *journal;
2710	int err;
2711
 
 
 
 
 
 
2712	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2713			test_opt(inode->i_sb, DELALLOC)) {
2714		/*
2715		 * With delalloc we want to sync the file
2716		 * so that we can make sure we allocate
2717		 * blocks for file
2718		 */
2719		filemap_write_and_wait(mapping);
2720	}
2721
2722	if (EXT4_JOURNAL(inode) &&
2723	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2724		/*
2725		 * This is a REALLY heavyweight approach, but the use of
2726		 * bmap on dirty files is expected to be extremely rare:
2727		 * only if we run lilo or swapon on a freshly made file
2728		 * do we expect this to happen.
2729		 *
2730		 * (bmap requires CAP_SYS_RAWIO so this does not
2731		 * represent an unprivileged user DOS attack --- we'd be
2732		 * in trouble if mortal users could trigger this path at
2733		 * will.)
2734		 *
2735		 * NB. EXT4_STATE_JDATA is not set on files other than
2736		 * regular files.  If somebody wants to bmap a directory
2737		 * or symlink and gets confused because the buffer
2738		 * hasn't yet been flushed to disk, they deserve
2739		 * everything they get.
2740		 */
2741
2742		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2743		journal = EXT4_JOURNAL(inode);
2744		jbd2_journal_lock_updates(journal);
2745		err = jbd2_journal_flush(journal);
2746		jbd2_journal_unlock_updates(journal);
2747
2748		if (err)
2749			return 0;
2750	}
2751
2752	return generic_block_bmap(mapping, block, ext4_get_block);
2753}
2754
2755static int ext4_readpage(struct file *file, struct page *page)
2756{
 
 
 
2757	trace_ext4_readpage(page);
2758	return mpage_readpage(page, ext4_get_block);
 
 
 
 
 
 
 
2759}
2760
2761static int
2762ext4_readpages(struct file *file, struct address_space *mapping,
2763		struct list_head *pages, unsigned nr_pages)
2764{
 
 
 
 
 
 
2765	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2766}
2767
2768static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
 
2769{
2770	struct buffer_head *head, *bh;
2771	unsigned int curr_off = 0;
2772
2773	if (!page_has_buffers(page))
2774		return;
2775	head = bh = page_buffers(page);
2776	do {
2777		if (offset <= curr_off && test_clear_buffer_uninit(bh)
2778					&& bh->b_private) {
2779			ext4_free_io_end(bh->b_private);
2780			bh->b_private = NULL;
2781			bh->b_end_io = NULL;
2782		}
2783		curr_off = curr_off + bh->b_size;
2784		bh = bh->b_this_page;
2785	} while (bh != head);
2786}
2787
2788static void ext4_invalidatepage(struct page *page, unsigned long offset)
 
 
2789{
2790	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2791
2792	trace_ext4_invalidatepage(page, offset);
2793
2794	/*
2795	 * free any io_end structure allocated for buffers to be discarded
2796	 */
2797	if (ext4_should_dioread_nolock(page->mapping->host))
2798		ext4_invalidatepage_free_endio(page, offset);
2799	/*
2800	 * If it's a full truncate we just forget about the pending dirtying
2801	 */
2802	if (offset == 0)
2803		ClearPageChecked(page);
2804
2805	if (journal)
2806		jbd2_journal_invalidatepage(journal, page, offset);
2807	else
2808		block_invalidatepage(page, offset);
 
 
 
 
 
2809}
2810
2811static int ext4_releasepage(struct page *page, gfp_t wait)
2812{
2813	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2814
2815	trace_ext4_releasepage(page);
2816
2817	WARN_ON(PageChecked(page));
2818	if (!page_has_buffers(page))
2819		return 0;
2820	if (journal)
2821		return jbd2_journal_try_to_free_buffers(journal, page, wait);
2822	else
2823		return try_to_free_buffers(page);
2824}
2825
2826/*
2827 * ext4_get_block used when preparing for a DIO write or buffer write.
2828 * We allocate an uinitialized extent if blocks haven't been allocated.
2829 * The extent will be converted to initialized after the IO is complete.
2830 */
2831static int ext4_get_block_write(struct inode *inode, sector_t iblock,
2832		   struct buffer_head *bh_result, int create)
2833{
2834	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
2835		   inode->i_ino, create);
2836	return _ext4_get_block(inode, iblock, bh_result,
2837			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
2838}
2839
 
 
 
 
 
 
 
 
 
2840static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2841			    ssize_t size, void *private, int ret,
2842			    bool is_async)
2843{
2844	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2845        ext4_io_end_t *io_end = iocb->private;
2846	struct workqueue_struct *wq;
2847	unsigned long flags;
2848	struct ext4_inode_info *ei;
2849
2850	/* if not async direct IO or dio with 0 bytes write, just return */
2851	if (!io_end || !size)
2852		goto out;
2853
2854	ext_debug("ext4_end_io_dio(): io_end 0x%p "
2855		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
2856 		  iocb->private, io_end->inode->i_ino, iocb, offset,
2857		  size);
2858
2859	iocb->private = NULL;
2860
2861	/* if not aio dio with unwritten extents, just free io and return */
2862	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2863		ext4_free_io_end(io_end);
2864out:
2865		if (is_async)
2866			aio_complete(iocb, ret, 0);
2867		inode_dio_done(inode);
2868		return;
2869	}
2870
2871	io_end->offset = offset;
2872	io_end->size = size;
2873	if (is_async) {
2874		io_end->iocb = iocb;
2875		io_end->result = ret;
2876	}
2877	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
2878
2879	/* Add the io_end to per-inode completed aio dio list*/
2880	ei = EXT4_I(io_end->inode);
2881	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2882	list_add_tail(&io_end->list, &ei->i_completed_io_list);
2883	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2884
2885	/* queue the work to convert unwritten extents to written */
2886	queue_work(wq, &io_end->work);
2887}
2888
2889static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2890{
2891	ext4_io_end_t *io_end = bh->b_private;
2892	struct workqueue_struct *wq;
2893	struct inode *inode;
2894	unsigned long flags;
2895
2896	if (!test_clear_buffer_uninit(bh) || !io_end)
2897		goto out;
2898
2899	if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
2900		ext4_msg(io_end->inode->i_sb, KERN_INFO,
2901			 "sb umounted, discard end_io request for inode %lu",
2902			 io_end->inode->i_ino);
2903		ext4_free_io_end(io_end);
2904		goto out;
2905	}
2906
2907	/*
2908	 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2909	 * but being more careful is always safe for the future change.
2910	 */
2911	inode = io_end->inode;
2912	ext4_set_io_unwritten_flag(inode, io_end);
2913
2914	/* Add the io_end to per-inode completed io list*/
2915	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2916	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2917	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2918
2919	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2920	/* queue the work to convert unwritten extents to written */
2921	queue_work(wq, &io_end->work);
2922out:
2923	bh->b_private = NULL;
2924	bh->b_end_io = NULL;
2925	clear_buffer_uninit(bh);
2926	end_buffer_async_write(bh, uptodate);
2927}
2928
2929static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2930{
2931	ext4_io_end_t *io_end;
2932	struct page *page = bh->b_page;
2933	loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2934	size_t size = bh->b_size;
2935
2936retry:
2937	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2938	if (!io_end) {
2939		pr_warn_ratelimited("%s: allocation fail\n", __func__);
2940		schedule();
2941		goto retry;
2942	}
2943	io_end->offset = offset;
2944	io_end->size = size;
2945	/*
2946	 * We need to hold a reference to the page to make sure it
2947	 * doesn't get evicted before ext4_end_io_work() has a chance
2948	 * to convert the extent from written to unwritten.
2949	 */
2950	io_end->page = page;
2951	get_page(io_end->page);
2952
2953	bh->b_private = io_end;
2954	bh->b_end_io = ext4_end_io_buffer_write;
2955	return 0;
2956}
2957
2958/*
2959 * For ext4 extent files, ext4 will do direct-io write to holes,
2960 * preallocated extents, and those write extend the file, no need to
2961 * fall back to buffered IO.
2962 *
2963 * For holes, we fallocate those blocks, mark them as uninitialized
2964 * If those blocks were preallocated, we mark sure they are splited, but
2965 * still keep the range to write as uninitialized.
2966 *
2967 * The unwrritten extents will be converted to written when DIO is completed.
2968 * For async direct IO, since the IO may still pending when return, we
2969 * set up an end_io call back function, which will do the conversion
2970 * when async direct IO completed.
2971 *
2972 * If the O_DIRECT write will extend the file then add this inode to the
2973 * orphan list.  So recovery will truncate it back to the original size
2974 * if the machine crashes during the write.
2975 *
2976 */
2977static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
2978			      const struct iovec *iov, loff_t offset,
2979			      unsigned long nr_segs)
2980{
2981	struct file *file = iocb->ki_filp;
2982	struct inode *inode = file->f_mapping->host;
2983	ssize_t ret;
2984	size_t count = iov_length(iov, nr_segs);
2985
 
 
2986	loff_t final_size = offset + count;
2987	if (rw == WRITE && final_size <= inode->i_size) {
2988		/*
2989 		 * We could direct write to holes and fallocate.
2990		 *
2991 		 * Allocated blocks to fill the hole are marked as uninitialized
2992 		 * to prevent parallel buffered read to expose the stale data
2993 		 * before DIO complete the data IO.
2994		 *
2995 		 * As to previously fallocated extents, ext4 get_block
2996 		 * will just simply mark the buffer mapped but still
2997 		 * keep the extents uninitialized.
2998 		 *
2999		 * for non AIO case, we will convert those unwritten extents
3000		 * to written after return back from blockdev_direct_IO.
3001		 *
3002		 * for async DIO, the conversion needs to be defered when
3003		 * the IO is completed. The ext4 end_io callback function
3004		 * will be called to take care of the conversion work.
3005		 * Here for async case, we allocate an io_end structure to
3006		 * hook to the iocb.
3007 		 */
3008		iocb->private = NULL;
3009		EXT4_I(inode)->cur_aio_dio = NULL;
3010		if (!is_sync_kiocb(iocb)) {
3011			ext4_io_end_t *io_end =
3012				ext4_init_io_end(inode, GFP_NOFS);
3013			if (!io_end)
3014				return -ENOMEM;
3015			io_end->flag |= EXT4_IO_END_DIRECT;
3016			iocb->private = io_end;
3017			/*
3018			 * we save the io structure for current async
3019			 * direct IO, so that later ext4_map_blocks()
3020			 * could flag the io structure whether there
3021			 * is a unwritten extents needs to be converted
3022			 * when IO is completed.
3023			 */
3024			EXT4_I(inode)->cur_aio_dio = iocb->private;
 
 
 
 
 
 
 
 
 
 
 
 
3025		}
 
 
 
 
 
 
 
 
 
 
 
 
3026
3027		ret = __blockdev_direct_IO(rw, iocb, inode,
3028					 inode->i_sb->s_bdev, iov,
3029					 offset, nr_segs,
3030					 ext4_get_block_write,
3031					 ext4_end_io_dio,
3032					 NULL,
3033					 DIO_LOCKING);
3034		if (iocb->private)
3035			EXT4_I(inode)->cur_aio_dio = NULL;
3036		/*
3037		 * The io_end structure takes a reference to the inode,
3038		 * that structure needs to be destroyed and the
3039		 * reference to the inode need to be dropped, when IO is
3040		 * complete, even with 0 byte write, or failed.
3041		 *
3042		 * In the successful AIO DIO case, the io_end structure will be
3043		 * desctroyed and the reference to the inode will be dropped
3044		 * after the end_io call back function is called.
3045		 *
3046		 * In the case there is 0 byte write, or error case, since
3047		 * VFS direct IO won't invoke the end_io call back function,
3048		 * we need to free the end_io structure here.
 
 
 
 
 
3049		 */
3050		if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3051			ext4_free_io_end(iocb->private);
 
 
3052			iocb->private = NULL;
3053		} else if (ret > 0 && ext4_test_inode_state(inode,
3054						EXT4_STATE_DIO_UNWRITTEN)) {
3055			int err;
3056			/*
3057			 * for non AIO case, since the IO is already
3058			 * completed, we could do the conversion right here
3059			 */
3060			err = ext4_convert_unwritten_extents(inode,
3061							     offset, ret);
3062			if (err < 0)
3063				ret = err;
3064			ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3065		}
3066		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
3067	}
3068
3069	/* for write the the end of file case, we fall back to old way */
3070	return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
 
 
 
 
 
 
 
 
3071}
3072
3073static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3074			      const struct iovec *iov, loff_t offset,
3075			      unsigned long nr_segs)
3076{
3077	struct file *file = iocb->ki_filp;
3078	struct inode *inode = file->f_mapping->host;
3079	ssize_t ret;
3080
3081	/*
3082	 * If we are doing data journalling we don't support O_DIRECT
3083	 */
3084	if (ext4_should_journal_data(inode))
3085		return 0;
3086
 
 
 
 
3087	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3088	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3089		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3090	else
3091		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3092	trace_ext4_direct_IO_exit(inode, offset,
3093				iov_length(iov, nr_segs), rw, ret);
3094	return ret;
3095}
3096
3097/*
3098 * Pages can be marked dirty completely asynchronously from ext4's journalling
3099 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3100 * much here because ->set_page_dirty is called under VFS locks.  The page is
3101 * not necessarily locked.
3102 *
3103 * We cannot just dirty the page and leave attached buffers clean, because the
3104 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3105 * or jbddirty because all the journalling code will explode.
3106 *
3107 * So what we do is to mark the page "pending dirty" and next time writepage
3108 * is called, propagate that into the buffers appropriately.
3109 */
3110static int ext4_journalled_set_page_dirty(struct page *page)
3111{
3112	SetPageChecked(page);
3113	return __set_page_dirty_nobuffers(page);
3114}
3115
3116static const struct address_space_operations ext4_ordered_aops = {
3117	.readpage		= ext4_readpage,
3118	.readpages		= ext4_readpages,
3119	.writepage		= ext4_writepage,
 
3120	.write_begin		= ext4_write_begin,
3121	.write_end		= ext4_ordered_write_end,
3122	.bmap			= ext4_bmap,
3123	.invalidatepage		= ext4_invalidatepage,
3124	.releasepage		= ext4_releasepage,
3125	.direct_IO		= ext4_direct_IO,
3126	.migratepage		= buffer_migrate_page,
3127	.is_partially_uptodate  = block_is_partially_uptodate,
3128	.error_remove_page	= generic_error_remove_page,
3129};
3130
3131static const struct address_space_operations ext4_writeback_aops = {
3132	.readpage		= ext4_readpage,
3133	.readpages		= ext4_readpages,
3134	.writepage		= ext4_writepage,
3135	.write_begin		= ext4_write_begin,
3136	.write_end		= ext4_writeback_write_end,
3137	.bmap			= ext4_bmap,
3138	.invalidatepage		= ext4_invalidatepage,
3139	.releasepage		= ext4_releasepage,
3140	.direct_IO		= ext4_direct_IO,
3141	.migratepage		= buffer_migrate_page,
3142	.is_partially_uptodate  = block_is_partially_uptodate,
3143	.error_remove_page	= generic_error_remove_page,
3144};
3145
3146static const struct address_space_operations ext4_journalled_aops = {
3147	.readpage		= ext4_readpage,
3148	.readpages		= ext4_readpages,
3149	.writepage		= ext4_writepage,
 
3150	.write_begin		= ext4_write_begin,
3151	.write_end		= ext4_journalled_write_end,
3152	.set_page_dirty		= ext4_journalled_set_page_dirty,
3153	.bmap			= ext4_bmap,
3154	.invalidatepage		= ext4_invalidatepage,
3155	.releasepage		= ext4_releasepage,
3156	.direct_IO		= ext4_direct_IO,
3157	.is_partially_uptodate  = block_is_partially_uptodate,
3158	.error_remove_page	= generic_error_remove_page,
3159};
3160
3161static const struct address_space_operations ext4_da_aops = {
3162	.readpage		= ext4_readpage,
3163	.readpages		= ext4_readpages,
3164	.writepage		= ext4_writepage,
3165	.writepages		= ext4_da_writepages,
3166	.write_begin		= ext4_da_write_begin,
3167	.write_end		= ext4_da_write_end,
3168	.bmap			= ext4_bmap,
3169	.invalidatepage		= ext4_da_invalidatepage,
3170	.releasepage		= ext4_releasepage,
3171	.direct_IO		= ext4_direct_IO,
3172	.migratepage		= buffer_migrate_page,
3173	.is_partially_uptodate  = block_is_partially_uptodate,
3174	.error_remove_page	= generic_error_remove_page,
3175};
3176
3177void ext4_set_aops(struct inode *inode)
3178{
3179	switch (ext4_inode_journal_mode(inode)) {
3180	case EXT4_INODE_ORDERED_DATA_MODE:
3181		if (test_opt(inode->i_sb, DELALLOC))
3182			inode->i_mapping->a_ops = &ext4_da_aops;
3183		else
3184			inode->i_mapping->a_ops = &ext4_ordered_aops;
3185		break;
3186	case EXT4_INODE_WRITEBACK_DATA_MODE:
3187		if (test_opt(inode->i_sb, DELALLOC))
3188			inode->i_mapping->a_ops = &ext4_da_aops;
3189		else
3190			inode->i_mapping->a_ops = &ext4_writeback_aops;
3191		break;
3192	case EXT4_INODE_JOURNAL_DATA_MODE:
3193		inode->i_mapping->a_ops = &ext4_journalled_aops;
3194		break;
3195	default:
3196		BUG();
3197	}
 
 
 
 
3198}
3199
3200
3201/*
3202 * ext4_discard_partial_page_buffers()
3203 * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
3204 * This function finds and locks the page containing the offset
3205 * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
3206 * Calling functions that already have the page locked should call
3207 * ext4_discard_partial_page_buffers_no_lock directly.
3208 */
3209int ext4_discard_partial_page_buffers(handle_t *handle,
3210		struct address_space *mapping, loff_t from,
3211		loff_t length, int flags)
3212{
 
 
 
 
3213	struct inode *inode = mapping->host;
 
3214	struct page *page;
3215	int err = 0;
3216
3217	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3218				   mapping_gfp_mask(mapping) & ~__GFP_FS);
3219	if (!page)
3220		return -ENOMEM;
3221
3222	err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3223		from, length, flags);
3224
3225	unlock_page(page);
3226	page_cache_release(page);
3227	return err;
3228}
3229
3230/*
3231 * ext4_discard_partial_page_buffers_no_lock()
3232 * Zeros a page range of length 'length' starting from offset 'from'.
3233 * Buffer heads that correspond to the block aligned regions of the
3234 * zeroed range will be unmapped.  Unblock aligned regions
3235 * will have the corresponding buffer head mapped if needed so that
3236 * that region of the page can be updated with the partial zero out.
3237 *
3238 * This function assumes that the page has already been  locked.  The
3239 * The range to be discarded must be contained with in the given page.
3240 * If the specified range exceeds the end of the page it will be shortened
3241 * to the end of the page that corresponds to 'from'.  This function is
3242 * appropriate for updating a page and it buffer heads to be unmapped and
3243 * zeroed for blocks that have been either released, or are going to be
3244 * released.
3245 *
3246 * handle: The journal handle
3247 * inode:  The files inode
3248 * page:   A locked page that contains the offset "from"
3249 * from:   The starting byte offset (from the begining of the file)
3250 *         to begin discarding
3251 * len:    The length of bytes to discard
3252 * flags:  Optional flags that may be used:
3253 *
3254 *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
3255 *         Only zero the regions of the page whose buffer heads
3256 *         have already been unmapped.  This flag is appropriate
3257 *         for updateing the contents of a page whose blocks may
3258 *         have already been released, and we only want to zero
3259 *         out the regions that correspond to those released blocks.
3260 *
3261 * Returns zero on sucess or negative on failure.
3262 */
3263static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3264		struct inode *inode, struct page *page, loff_t from,
3265		loff_t length, int flags)
3266{
3267	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3268	unsigned int offset = from & (PAGE_CACHE_SIZE-1);
3269	unsigned int blocksize, max, pos;
3270	ext4_lblk_t iblock;
3271	struct buffer_head *bh;
3272	int err = 0;
3273
3274	blocksize = inode->i_sb->s_blocksize;
3275	max = PAGE_CACHE_SIZE - offset;
3276
3277	if (index != page->index)
3278		return -EINVAL;
3279
3280	/*
3281	 * correct length if it does not fall between
3282	 * 'from' and the end of the page
3283	 */
3284	if (length > max || length < 0)
3285		length = max;
3286
3287	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3288
3289	if (!page_has_buffers(page))
3290		create_empty_buffers(page, blocksize, 0);
3291
3292	/* Find the buffer that contains "offset" */
3293	bh = page_buffers(page);
3294	pos = blocksize;
3295	while (offset >= pos) {
3296		bh = bh->b_this_page;
3297		iblock++;
3298		pos += blocksize;
3299	}
 
 
 
 
 
 
 
 
 
 
 
 
 
3300
3301	pos = offset;
3302	while (pos < offset + length) {
3303		unsigned int end_of_block, range_to_discard;
3304
3305		err = 0;
3306
3307		/* The length of space left to zero and unmap */
3308		range_to_discard = offset + length - pos;
3309
3310		/* The length of space until the end of the block */
3311		end_of_block = blocksize - (pos & (blocksize-1));
3312
3313		/*
3314		 * Do not unmap or zero past end of block
3315		 * for this buffer head
3316		 */
3317		if (range_to_discard > end_of_block)
3318			range_to_discard = end_of_block;
3319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3320
3321		/*
3322		 * Skip this buffer head if we are only zeroing unampped
3323		 * regions of the page
3324		 */
3325		if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
3326			buffer_mapped(bh))
3327				goto next;
 
3328
3329		/* If the range is block aligned, unmap */
3330		if (range_to_discard == blocksize) {
3331			clear_buffer_dirty(bh);
3332			bh->b_bdev = NULL;
3333			clear_buffer_mapped(bh);
3334			clear_buffer_req(bh);
3335			clear_buffer_new(bh);
3336			clear_buffer_delay(bh);
3337			clear_buffer_unwritten(bh);
3338			clear_buffer_uptodate(bh);
3339			zero_user(page, pos, range_to_discard);
3340			BUFFER_TRACE(bh, "Buffer discarded");
3341			goto next;
3342		}
3343
3344		/*
3345		 * If this block is not completely contained in the range
3346		 * to be discarded, then it is not going to be released. Because
3347		 * we need to keep this block, we need to make sure this part
3348		 * of the page is uptodate before we modify it by writeing
3349		 * partial zeros on it.
3350		 */
3351		if (!buffer_mapped(bh)) {
3352			/*
3353			 * Buffer head must be mapped before we can read
3354			 * from the block
3355			 */
3356			BUFFER_TRACE(bh, "unmapped");
3357			ext4_get_block(inode, iblock, bh, 0);
3358			/* unmapped? It's a hole - nothing to do */
3359			if (!buffer_mapped(bh)) {
3360				BUFFER_TRACE(bh, "still unmapped");
3361				goto next;
3362			}
3363		}
3364
3365		/* Ok, it's mapped. Make sure it's up-to-date */
3366		if (PageUptodate(page))
3367			set_buffer_uptodate(bh);
3368
3369		if (!buffer_uptodate(bh)) {
3370			err = -EIO;
3371			ll_rw_block(READ, 1, &bh);
3372			wait_on_buffer(bh);
3373			/* Uhhuh. Read error. Complain and punt.*/
3374			if (!buffer_uptodate(bh))
3375				goto next;
3376		}
3377
3378		if (ext4_should_journal_data(inode)) {
3379			BUFFER_TRACE(bh, "get write access");
3380			err = ext4_journal_get_write_access(handle, bh);
3381			if (err)
3382				goto next;
3383		}
 
 
 
3384
3385		zero_user(page, pos, range_to_discard);
 
3386
3387		err = 0;
3388		if (ext4_should_journal_data(inode)) {
3389			err = ext4_handle_dirty_metadata(handle, inode, bh);
3390		} else
3391			mark_buffer_dirty(bh);
3392
3393		BUFFER_TRACE(bh, "Partial buffer zeroed");
3394next:
3395		bh = bh->b_this_page;
3396		iblock++;
3397		pos += range_to_discard;
 
 
 
 
 
 
 
 
3398	}
3399
 
 
 
 
3400	return err;
3401}
3402
3403int ext4_can_truncate(struct inode *inode)
3404{
3405	if (S_ISREG(inode->i_mode))
3406		return 1;
3407	if (S_ISDIR(inode->i_mode))
3408		return 1;
3409	if (S_ISLNK(inode->i_mode))
3410		return !ext4_inode_is_fast_symlink(inode);
3411	return 0;
3412}
3413
3414/*
3415 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3416 * associated with the given offset and length
3417 *
3418 * @inode:  File inode
3419 * @offset: The offset where the hole will begin
3420 * @len:    The length of the hole
3421 *
3422 * Returns: 0 on sucess or negative on failure
3423 */
3424
3425int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3426{
3427	struct inode *inode = file->f_path.dentry->d_inode;
 
 
 
 
 
 
 
3428	if (!S_ISREG(inode->i_mode))
3429		return -EOPNOTSUPP;
3430
3431	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3432		/* TODO: Add support for non extent hole punching */
3433		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
3434	}
3435
3436	if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3437		/* TODO: Add support for bigalloc file systems */
3438		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3439	}
3440
3441	return ext4_ext_punch_hole(file, offset, length);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3442}
3443
3444/*
3445 * ext4_truncate()
3446 *
3447 * We block out ext4_get_block() block instantiations across the entire
3448 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3449 * simultaneously on behalf of the same inode.
3450 *
3451 * As we work through the truncate and commit bits of it to the journal there
3452 * is one core, guiding principle: the file's tree must always be consistent on
3453 * disk.  We must be able to restart the truncate after a crash.
3454 *
3455 * The file's tree may be transiently inconsistent in memory (although it
3456 * probably isn't), but whenever we close off and commit a journal transaction,
3457 * the contents of (the filesystem + the journal) must be consistent and
3458 * restartable.  It's pretty simple, really: bottom up, right to left (although
3459 * left-to-right works OK too).
3460 *
3461 * Note that at recovery time, journal replay occurs *before* the restart of
3462 * truncate against the orphan inode list.
3463 *
3464 * The committed inode has the new, desired i_size (which is the same as
3465 * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3466 * that this inode's truncate did not complete and it will again call
3467 * ext4_truncate() to have another go.  So there will be instantiated blocks
3468 * to the right of the truncation point in a crashed ext4 filesystem.  But
3469 * that's fine - as long as they are linked from the inode, the post-crash
3470 * ext4_truncate() run will find them and release them.
3471 */
3472void ext4_truncate(struct inode *inode)
3473{
 
 
 
 
 
 
 
 
 
 
 
 
3474	trace_ext4_truncate_enter(inode);
3475
3476	if (!ext4_can_truncate(inode))
3477		return;
3478
3479	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3480
3481	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3482		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3483
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3484	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3485		ext4_ext_truncate(inode);
3486	else
3487		ext4_ind_truncate(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3488
3489	trace_ext4_truncate_exit(inode);
3490}
3491
3492/*
3493 * ext4_get_inode_loc returns with an extra refcount against the inode's
3494 * underlying buffer_head on success. If 'in_mem' is true, we have all
3495 * data in memory that is needed to recreate the on-disk version of this
3496 * inode.
3497 */
3498static int __ext4_get_inode_loc(struct inode *inode,
3499				struct ext4_iloc *iloc, int in_mem)
3500{
3501	struct ext4_group_desc	*gdp;
3502	struct buffer_head	*bh;
3503	struct super_block	*sb = inode->i_sb;
3504	ext4_fsblk_t		block;
3505	int			inodes_per_block, inode_offset;
3506
3507	iloc->bh = NULL;
3508	if (!ext4_valid_inum(sb, inode->i_ino))
3509		return -EIO;
3510
3511	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3512	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3513	if (!gdp)
3514		return -EIO;
3515
3516	/*
3517	 * Figure out the offset within the block group inode table
3518	 */
3519	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3520	inode_offset = ((inode->i_ino - 1) %
3521			EXT4_INODES_PER_GROUP(sb));
3522	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3523	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3524
3525	bh = sb_getblk(sb, block);
3526	if (!bh) {
3527		EXT4_ERROR_INODE_BLOCK(inode, block,
3528				       "unable to read itable block");
3529		return -EIO;
3530	}
3531	if (!buffer_uptodate(bh)) {
3532		lock_buffer(bh);
3533
3534		/*
3535		 * If the buffer has the write error flag, we have failed
3536		 * to write out another inode in the same block.  In this
3537		 * case, we don't have to read the block because we may
3538		 * read the old inode data successfully.
3539		 */
3540		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3541			set_buffer_uptodate(bh);
3542
3543		if (buffer_uptodate(bh)) {
3544			/* someone brought it uptodate while we waited */
3545			unlock_buffer(bh);
3546			goto has_buffer;
3547		}
3548
3549		/*
3550		 * If we have all information of the inode in memory and this
3551		 * is the only valid inode in the block, we need not read the
3552		 * block.
3553		 */
3554		if (in_mem) {
3555			struct buffer_head *bitmap_bh;
3556			int i, start;
3557
3558			start = inode_offset & ~(inodes_per_block - 1);
3559
3560			/* Is the inode bitmap in cache? */
3561			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3562			if (!bitmap_bh)
3563				goto make_io;
3564
3565			/*
3566			 * If the inode bitmap isn't in cache then the
3567			 * optimisation may end up performing two reads instead
3568			 * of one, so skip it.
3569			 */
3570			if (!buffer_uptodate(bitmap_bh)) {
3571				brelse(bitmap_bh);
3572				goto make_io;
3573			}
3574			for (i = start; i < start + inodes_per_block; i++) {
3575				if (i == inode_offset)
3576					continue;
3577				if (ext4_test_bit(i, bitmap_bh->b_data))
3578					break;
3579			}
3580			brelse(bitmap_bh);
3581			if (i == start + inodes_per_block) {
3582				/* all other inodes are free, so skip I/O */
3583				memset(bh->b_data, 0, bh->b_size);
3584				set_buffer_uptodate(bh);
3585				unlock_buffer(bh);
3586				goto has_buffer;
3587			}
3588		}
3589
3590make_io:
3591		/*
3592		 * If we need to do any I/O, try to pre-readahead extra
3593		 * blocks from the inode table.
3594		 */
3595		if (EXT4_SB(sb)->s_inode_readahead_blks) {
3596			ext4_fsblk_t b, end, table;
3597			unsigned num;
 
3598
3599			table = ext4_inode_table(sb, gdp);
3600			/* s_inode_readahead_blks is always a power of 2 */
3601			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3602			if (table > b)
3603				b = table;
3604			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3605			num = EXT4_INODES_PER_GROUP(sb);
3606			if (ext4_has_group_desc_csum(sb))
3607				num -= ext4_itable_unused_count(sb, gdp);
3608			table += num / inodes_per_block;
3609			if (end > table)
3610				end = table;
3611			while (b <= end)
3612				sb_breadahead(sb, b++);
3613		}
3614
3615		/*
3616		 * There are other valid inodes in the buffer, this inode
3617		 * has in-inode xattrs, or we don't have this inode in memory.
3618		 * Read the block from disk.
3619		 */
3620		trace_ext4_load_inode(inode);
3621		get_bh(bh);
3622		bh->b_end_io = end_buffer_read_sync;
3623		submit_bh(READ | REQ_META | REQ_PRIO, bh);
3624		wait_on_buffer(bh);
3625		if (!buffer_uptodate(bh)) {
3626			EXT4_ERROR_INODE_BLOCK(inode, block,
3627					       "unable to read itable block");
3628			brelse(bh);
3629			return -EIO;
3630		}
3631	}
3632has_buffer:
3633	iloc->bh = bh;
3634	return 0;
3635}
3636
3637int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3638{
3639	/* We have all inode data except xattrs in memory here. */
3640	return __ext4_get_inode_loc(inode, iloc,
3641		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3642}
3643
3644void ext4_set_inode_flags(struct inode *inode)
3645{
3646	unsigned int flags = EXT4_I(inode)->i_flags;
 
3647
3648	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3649	if (flags & EXT4_SYNC_FL)
3650		inode->i_flags |= S_SYNC;
3651	if (flags & EXT4_APPEND_FL)
3652		inode->i_flags |= S_APPEND;
3653	if (flags & EXT4_IMMUTABLE_FL)
3654		inode->i_flags |= S_IMMUTABLE;
3655	if (flags & EXT4_NOATIME_FL)
3656		inode->i_flags |= S_NOATIME;
3657	if (flags & EXT4_DIRSYNC_FL)
3658		inode->i_flags |= S_DIRSYNC;
 
 
3659}
3660
3661/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3662void ext4_get_inode_flags(struct ext4_inode_info *ei)
3663{
3664	unsigned int vfs_fl;
3665	unsigned long old_fl, new_fl;
3666
3667	do {
3668		vfs_fl = ei->vfs_inode.i_flags;
3669		old_fl = ei->i_flags;
3670		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3671				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3672				EXT4_DIRSYNC_FL);
3673		if (vfs_fl & S_SYNC)
3674			new_fl |= EXT4_SYNC_FL;
3675		if (vfs_fl & S_APPEND)
3676			new_fl |= EXT4_APPEND_FL;
3677		if (vfs_fl & S_IMMUTABLE)
3678			new_fl |= EXT4_IMMUTABLE_FL;
3679		if (vfs_fl & S_NOATIME)
3680			new_fl |= EXT4_NOATIME_FL;
3681		if (vfs_fl & S_DIRSYNC)
3682			new_fl |= EXT4_DIRSYNC_FL;
3683	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3684}
3685
3686static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3687				  struct ext4_inode_info *ei)
3688{
3689	blkcnt_t i_blocks ;
3690	struct inode *inode = &(ei->vfs_inode);
3691	struct super_block *sb = inode->i_sb;
3692
3693	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3694				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3695		/* we are using combined 48 bit field */
3696		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3697					le32_to_cpu(raw_inode->i_blocks_lo);
3698		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3699			/* i_blocks represent file system block size */
3700			return i_blocks  << (inode->i_blkbits - 9);
3701		} else {
3702			return i_blocks;
3703		}
3704	} else {
3705		return le32_to_cpu(raw_inode->i_blocks_lo);
3706	}
3707}
3708
 
 
 
 
 
 
 
 
 
 
 
 
 
3709struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3710{
3711	struct ext4_iloc iloc;
3712	struct ext4_inode *raw_inode;
3713	struct ext4_inode_info *ei;
3714	struct inode *inode;
3715	journal_t *journal = EXT4_SB(sb)->s_journal;
3716	long ret;
3717	int block;
3718	uid_t i_uid;
3719	gid_t i_gid;
3720
3721	inode = iget_locked(sb, ino);
3722	if (!inode)
3723		return ERR_PTR(-ENOMEM);
3724	if (!(inode->i_state & I_NEW))
3725		return inode;
3726
3727	ei = EXT4_I(inode);
3728	iloc.bh = NULL;
3729
3730	ret = __ext4_get_inode_loc(inode, &iloc, 0);
3731	if (ret < 0)
3732		goto bad_inode;
3733	raw_inode = ext4_raw_inode(&iloc);
3734
3735	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3736		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3737		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3738		    EXT4_INODE_SIZE(inode->i_sb)) {
3739			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3740				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3741				EXT4_INODE_SIZE(inode->i_sb));
3742			ret = -EIO;
3743			goto bad_inode;
3744		}
3745	} else
3746		ei->i_extra_isize = 0;
3747
3748	/* Precompute checksum seed for inode metadata */
3749	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3750			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3751		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3752		__u32 csum;
3753		__le32 inum = cpu_to_le32(inode->i_ino);
3754		__le32 gen = raw_inode->i_generation;
3755		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3756				   sizeof(inum));
3757		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3758					      sizeof(gen));
3759	}
3760
3761	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3762		EXT4_ERROR_INODE(inode, "checksum invalid");
3763		ret = -EIO;
3764		goto bad_inode;
3765	}
3766
3767	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3768	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3769	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3770	if (!(test_opt(inode->i_sb, NO_UID32))) {
3771		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3772		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3773	}
3774	i_uid_write(inode, i_uid);
3775	i_gid_write(inode, i_gid);
3776	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3777
3778	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
 
3779	ei->i_dir_start_lookup = 0;
3780	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3781	/* We now have enough fields to check if the inode was active or not.
3782	 * This is needed because nfsd might try to access dead inodes
3783	 * the test is that same one that e2fsck uses
3784	 * NeilBrown 1999oct15
3785	 */
3786	if (inode->i_nlink == 0) {
3787		if (inode->i_mode == 0 ||
3788		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
 
3789			/* this inode is deleted */
3790			ret = -ESTALE;
3791			goto bad_inode;
3792		}
3793		/* The only unlinked inodes we let through here have
3794		 * valid i_mode and are being read by the orphan
3795		 * recovery code: that's fine, we're about to complete
3796		 * the process of deleting those. */
 
 
3797	}
3798	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
3799	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
3800	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3801	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3802		ei->i_file_acl |=
3803			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3804	inode->i_size = ext4_isize(raw_inode);
3805	ei->i_disksize = inode->i_size;
3806#ifdef CONFIG_QUOTA
3807	ei->i_reserved_quota = 0;
3808#endif
3809	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3810	ei->i_block_group = iloc.block_group;
3811	ei->i_last_alloc_group = ~0;
3812	/*
3813	 * NOTE! The in-memory inode i_data array is in little-endian order
3814	 * even on big-endian machines: we do NOT byteswap the block numbers!
3815	 */
3816	for (block = 0; block < EXT4_N_BLOCKS; block++)
3817		ei->i_data[block] = raw_inode->i_block[block];
3818	INIT_LIST_HEAD(&ei->i_orphan);
3819
3820	/*
3821	 * Set transaction id's of transactions that have to be committed
3822	 * to finish f[data]sync. We set them to currently running transaction
3823	 * as we cannot be sure that the inode or some of its metadata isn't
3824	 * part of the transaction - the inode could have been reclaimed and
3825	 * now it is reread from disk.
3826	 */
3827	if (journal) {
3828		transaction_t *transaction;
3829		tid_t tid;
3830
3831		read_lock(&journal->j_state_lock);
3832		if (journal->j_running_transaction)
3833			transaction = journal->j_running_transaction;
3834		else
3835			transaction = journal->j_committing_transaction;
3836		if (transaction)
3837			tid = transaction->t_tid;
3838		else
3839			tid = journal->j_commit_sequence;
3840		read_unlock(&journal->j_state_lock);
3841		ei->i_sync_tid = tid;
3842		ei->i_datasync_tid = tid;
3843	}
3844
3845	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3846		if (ei->i_extra_isize == 0) {
3847			/* The extra space is currently unused. Use it. */
3848			ei->i_extra_isize = sizeof(struct ext4_inode) -
3849					    EXT4_GOOD_OLD_INODE_SIZE;
3850		} else {
3851			__le32 *magic = (void *)raw_inode +
3852					EXT4_GOOD_OLD_INODE_SIZE +
3853					ei->i_extra_isize;
3854			if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3855				ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3856		}
3857	}
3858
3859	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3860	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3861	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3862	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3863
3864	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
3865	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3866		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3867			inode->i_version |=
3868			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
 
 
3869	}
3870
3871	ret = 0;
3872	if (ei->i_file_acl &&
3873	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3874		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
3875				 ei->i_file_acl);
3876		ret = -EIO;
3877		goto bad_inode;
3878	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3879		if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3880		    (S_ISLNK(inode->i_mode) &&
3881		     !ext4_inode_is_fast_symlink(inode)))
3882			/* Validate extent which is part of inode */
3883			ret = ext4_ext_check_inode(inode);
3884	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3885		   (S_ISLNK(inode->i_mode) &&
3886		    !ext4_inode_is_fast_symlink(inode))) {
3887		/* Validate block references which are part of inode */
3888		ret = ext4_ind_check_inode(inode);
 
 
3889	}
3890	if (ret)
3891		goto bad_inode;
3892
3893	if (S_ISREG(inode->i_mode)) {
3894		inode->i_op = &ext4_file_inode_operations;
3895		inode->i_fop = &ext4_file_operations;
3896		ext4_set_aops(inode);
3897	} else if (S_ISDIR(inode->i_mode)) {
3898		inode->i_op = &ext4_dir_inode_operations;
3899		inode->i_fop = &ext4_dir_operations;
3900	} else if (S_ISLNK(inode->i_mode)) {
3901		if (ext4_inode_is_fast_symlink(inode)) {
3902			inode->i_op = &ext4_fast_symlink_inode_operations;
3903			nd_terminate_link(ei->i_data, inode->i_size,
3904				sizeof(ei->i_data) - 1);
3905		} else {
3906			inode->i_op = &ext4_symlink_inode_operations;
3907			ext4_set_aops(inode);
3908		}
3909	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3910	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3911		inode->i_op = &ext4_special_inode_operations;
3912		if (raw_inode->i_block[0])
3913			init_special_inode(inode, inode->i_mode,
3914			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3915		else
3916			init_special_inode(inode, inode->i_mode,
3917			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
 
 
3918	} else {
3919		ret = -EIO;
3920		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3921		goto bad_inode;
3922	}
3923	brelse(iloc.bh);
3924	ext4_set_inode_flags(inode);
3925	unlock_new_inode(inode);
3926	return inode;
3927
3928bad_inode:
3929	brelse(iloc.bh);
3930	iget_failed(inode);
3931	return ERR_PTR(ret);
3932}
3933
3934static int ext4_inode_blocks_set(handle_t *handle,
3935				struct ext4_inode *raw_inode,
3936				struct ext4_inode_info *ei)
3937{
3938	struct inode *inode = &(ei->vfs_inode);
3939	u64 i_blocks = inode->i_blocks;
3940	struct super_block *sb = inode->i_sb;
3941
3942	if (i_blocks <= ~0U) {
3943		/*
3944		 * i_blocks can be represnted in a 32 bit variable
3945		 * as multiple of 512 bytes
3946		 */
3947		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3948		raw_inode->i_blocks_high = 0;
3949		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3950		return 0;
3951	}
3952	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
3953		return -EFBIG;
3954
3955	if (i_blocks <= 0xffffffffffffULL) {
3956		/*
3957		 * i_blocks can be represented in a 48 bit variable
3958		 * as multiple of 512 bytes
3959		 */
3960		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3961		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3962		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3963	} else {
3964		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3965		/* i_block is stored in file system block size */
3966		i_blocks = i_blocks >> (inode->i_blkbits - 9);
3967		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3968		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3969	}
3970	return 0;
3971}
3972
3973/*
3974 * Post the struct inode info into an on-disk inode location in the
3975 * buffer-cache.  This gobbles the caller's reference to the
3976 * buffer_head in the inode location struct.
3977 *
3978 * The caller must have write access to iloc->bh.
3979 */
3980static int ext4_do_update_inode(handle_t *handle,
3981				struct inode *inode,
3982				struct ext4_iloc *iloc)
3983{
3984	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
3985	struct ext4_inode_info *ei = EXT4_I(inode);
3986	struct buffer_head *bh = iloc->bh;
3987	int err = 0, rc, block;
 
3988	uid_t i_uid;
3989	gid_t i_gid;
3990
3991	/* For fields not not tracking in the in-memory inode,
3992	 * initialise them to zero for new inodes. */
3993	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
3994		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
3995
3996	ext4_get_inode_flags(ei);
3997	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3998	i_uid = i_uid_read(inode);
3999	i_gid = i_gid_read(inode);
4000	if (!(test_opt(inode->i_sb, NO_UID32))) {
4001		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4002		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4003/*
4004 * Fix up interoperability with old kernels. Otherwise, old inodes get
4005 * re-used with the upper 16 bits of the uid/gid intact
4006 */
4007		if (!ei->i_dtime) {
4008			raw_inode->i_uid_high =
4009				cpu_to_le16(high_16_bits(i_uid));
4010			raw_inode->i_gid_high =
4011				cpu_to_le16(high_16_bits(i_gid));
4012		} else {
4013			raw_inode->i_uid_high = 0;
4014			raw_inode->i_gid_high = 0;
4015		}
4016	} else {
4017		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4018		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4019		raw_inode->i_uid_high = 0;
4020		raw_inode->i_gid_high = 0;
4021	}
4022	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4023
4024	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4025	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4026	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4027	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4028
4029	if (ext4_inode_blocks_set(handle, raw_inode, ei))
4030		goto out_brelse;
4031	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4032	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4033	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4034	    cpu_to_le32(EXT4_OS_HURD))
4035		raw_inode->i_file_acl_high =
4036			cpu_to_le16(ei->i_file_acl >> 32);
4037	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4038	ext4_isize_set(raw_inode, ei->i_disksize);
 
 
 
4039	if (ei->i_disksize > 0x7fffffffULL) {
4040		struct super_block *sb = inode->i_sb;
4041		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4042				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4043				EXT4_SB(sb)->s_es->s_rev_level ==
4044				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4045			/* If this is the first large file
4046			 * created, add a flag to the superblock.
4047			 */
4048			err = ext4_journal_get_write_access(handle,
4049					EXT4_SB(sb)->s_sbh);
4050			if (err)
4051				goto out_brelse;
4052			ext4_update_dynamic_rev(sb);
4053			EXT4_SET_RO_COMPAT_FEATURE(sb,
4054					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4055			ext4_handle_sync(handle);
4056			err = ext4_handle_dirty_super_now(handle, sb);
4057		}
4058	}
4059	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4060	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4061		if (old_valid_dev(inode->i_rdev)) {
4062			raw_inode->i_block[0] =
4063				cpu_to_le32(old_encode_dev(inode->i_rdev));
4064			raw_inode->i_block[1] = 0;
4065		} else {
4066			raw_inode->i_block[0] = 0;
4067			raw_inode->i_block[1] =
4068				cpu_to_le32(new_encode_dev(inode->i_rdev));
4069			raw_inode->i_block[2] = 0;
4070		}
4071	} else
4072		for (block = 0; block < EXT4_N_BLOCKS; block++)
4073			raw_inode->i_block[block] = ei->i_data[block];
 
4074
4075	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4076	if (ei->i_extra_isize) {
4077		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4078			raw_inode->i_version_hi =
4079			cpu_to_le32(inode->i_version >> 32);
4080		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
 
 
 
4081	}
4082
4083	ext4_inode_csum_set(inode, raw_inode, ei);
4084
4085	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4086	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4087	if (!err)
4088		err = rc;
4089	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4090
4091	ext4_update_inode_fsync_trans(handle, inode, 0);
4092out_brelse:
4093	brelse(bh);
4094	ext4_std_error(inode->i_sb, err);
4095	return err;
4096}
4097
4098/*
4099 * ext4_write_inode()
4100 *
4101 * We are called from a few places:
4102 *
4103 * - Within generic_file_write() for O_SYNC files.
4104 *   Here, there will be no transaction running. We wait for any running
4105 *   trasnaction to commit.
4106 *
4107 * - Within sys_sync(), kupdate and such.
4108 *   We wait on commit, if tol to.
4109 *
4110 * - Within prune_icache() (PF_MEMALLOC == true)
4111 *   Here we simply return.  We can't afford to block kswapd on the
4112 *   journal commit.
4113 *
4114 * In all cases it is actually safe for us to return without doing anything,
4115 * because the inode has been copied into a raw inode buffer in
4116 * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4117 * knfsd.
4118 *
4119 * Note that we are absolutely dependent upon all inode dirtiers doing the
4120 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4121 * which we are interested.
4122 *
4123 * It would be a bug for them to not do this.  The code:
4124 *
4125 *	mark_inode_dirty(inode)
4126 *	stuff();
4127 *	inode->i_size = expr;
4128 *
4129 * is in error because a kswapd-driven write_inode() could occur while
4130 * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4131 * will no longer be on the superblock's dirty inode list.
4132 */
4133int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4134{
4135	int err;
4136
4137	if (current->flags & PF_MEMALLOC)
4138		return 0;
4139
4140	if (EXT4_SB(inode->i_sb)->s_journal) {
4141		if (ext4_journal_current_handle()) {
4142			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4143			dump_stack();
4144			return -EIO;
4145		}
4146
4147		if (wbc->sync_mode != WB_SYNC_ALL)
 
 
 
 
 
4148			return 0;
4149
4150		err = ext4_force_commit(inode->i_sb);
4151	} else {
4152		struct ext4_iloc iloc;
4153
4154		err = __ext4_get_inode_loc(inode, &iloc, 0);
4155		if (err)
4156			return err;
4157		if (wbc->sync_mode == WB_SYNC_ALL)
 
 
 
 
4158			sync_dirty_buffer(iloc.bh);
4159		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4160			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4161					 "IO error syncing inode");
4162			err = -EIO;
4163		}
4164		brelse(iloc.bh);
4165	}
4166	return err;
4167}
4168
4169/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4170 * ext4_setattr()
4171 *
4172 * Called from notify_change.
4173 *
4174 * We want to trap VFS attempts to truncate the file as soon as
4175 * possible.  In particular, we want to make sure that when the VFS
4176 * shrinks i_size, we put the inode on the orphan list and modify
4177 * i_disksize immediately, so that during the subsequent flushing of
4178 * dirty pages and freeing of disk blocks, we can guarantee that any
4179 * commit will leave the blocks being flushed in an unused state on
4180 * disk.  (On recovery, the inode will get truncated and the blocks will
4181 * be freed, so we have a strong guarantee that no future commit will
4182 * leave these blocks visible to the user.)
4183 *
4184 * Another thing we have to assure is that if we are in ordered mode
4185 * and inode is still attached to the committing transaction, we must
4186 * we start writeout of all the dirty pages which are being truncated.
4187 * This way we are sure that all the data written in the previous
4188 * transaction are already on disk (truncate waits for pages under
4189 * writeback).
4190 *
4191 * Called with inode->i_mutex down.
4192 */
4193int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4194{
4195	struct inode *inode = dentry->d_inode;
4196	int error, rc = 0;
4197	int orphan = 0;
4198	const unsigned int ia_valid = attr->ia_valid;
4199
4200	error = inode_change_ok(inode, attr);
4201	if (error)
4202		return error;
4203
4204	if (is_quota_modification(inode, attr))
4205		dquot_initialize(inode);
4206	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4207	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4208		handle_t *handle;
4209
4210		/* (user+group)*(old+new) structure, inode write (sb,
4211		 * inode block, ? - but truncate inode update has it) */
4212		handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4213					EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
 
4214		if (IS_ERR(handle)) {
4215			error = PTR_ERR(handle);
4216			goto err_out;
4217		}
4218		error = dquot_transfer(inode, attr);
4219		if (error) {
4220			ext4_journal_stop(handle);
4221			return error;
4222		}
4223		/* Update corresponding info in inode so that everything is in
4224		 * one transaction */
4225		if (attr->ia_valid & ATTR_UID)
4226			inode->i_uid = attr->ia_uid;
4227		if (attr->ia_valid & ATTR_GID)
4228			inode->i_gid = attr->ia_gid;
4229		error = ext4_mark_inode_dirty(handle, inode);
4230		ext4_journal_stop(handle);
4231	}
4232
4233	if (attr->ia_valid & ATTR_SIZE) {
4234		inode_dio_wait(inode);
4235
4236		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4237			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4238
4239			if (attr->ia_size > sbi->s_bitmap_maxbytes)
4240				return -EFBIG;
4241		}
4242	}
4243
4244	if (S_ISREG(inode->i_mode) &&
4245	    attr->ia_valid & ATTR_SIZE &&
4246	    (attr->ia_size < inode->i_size)) {
4247		handle_t *handle;
4248
4249		handle = ext4_journal_start(inode, 3);
4250		if (IS_ERR(handle)) {
4251			error = PTR_ERR(handle);
4252			goto err_out;
4253		}
4254		if (ext4_handle_valid(handle)) {
4255			error = ext4_orphan_add(handle, inode);
4256			orphan = 1;
4257		}
4258		EXT4_I(inode)->i_disksize = attr->ia_size;
4259		rc = ext4_mark_inode_dirty(handle, inode);
4260		if (!error)
4261			error = rc;
4262		ext4_journal_stop(handle);
4263
4264		if (ext4_should_order_data(inode)) {
4265			error = ext4_begin_ordered_truncate(inode,
 
 
4266							    attr->ia_size);
4267			if (error) {
4268				/* Do as much error cleanup as possible */
4269				handle = ext4_journal_start(inode, 3);
4270				if (IS_ERR(handle)) {
4271					ext4_orphan_del(NULL, inode);
4272					goto err_out;
4273				}
4274				ext4_orphan_del(handle, inode);
4275				orphan = 0;
4276				ext4_journal_stop(handle);
4277				goto err_out;
4278			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4279		}
 
 
 
 
 
4280	}
4281
4282	if (attr->ia_valid & ATTR_SIZE) {
4283		if (attr->ia_size != i_size_read(inode))
4284			truncate_setsize(inode, attr->ia_size);
 
4285		ext4_truncate(inode);
4286	}
4287
4288	if (!rc) {
4289		setattr_copy(inode, attr);
4290		mark_inode_dirty(inode);
4291	}
4292
4293	/*
4294	 * If the call to ext4_truncate failed to get a transaction handle at
4295	 * all, we need to clean up the in-core orphan list manually.
4296	 */
4297	if (orphan && inode->i_nlink)
4298		ext4_orphan_del(NULL, inode);
4299
4300	if (!rc && (ia_valid & ATTR_MODE))
4301		rc = ext4_acl_chmod(inode);
4302
4303err_out:
4304	ext4_std_error(inode->i_sb, error);
4305	if (!error)
4306		error = rc;
4307	return error;
4308}
4309
4310int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4311		 struct kstat *stat)
4312{
4313	struct inode *inode;
4314	unsigned long delalloc_blocks;
4315
4316	inode = dentry->d_inode;
4317	generic_fillattr(inode, stat);
4318
4319	/*
 
 
 
 
 
 
 
 
 
4320	 * We can't update i_blocks if the block allocation is delayed
4321	 * otherwise in the case of system crash before the real block
4322	 * allocation is done, we will have i_blocks inconsistent with
4323	 * on-disk file blocks.
4324	 * We always keep i_blocks updated together with real
4325	 * allocation. But to not confuse with user, stat
4326	 * will return the blocks that include the delayed allocation
4327	 * blocks for this file.
4328	 */
4329	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4330				EXT4_I(inode)->i_reserved_data_blocks);
4331
4332	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4333	return 0;
4334}
4335
4336static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 
4337{
4338	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4339		return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4340	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4341}
4342
4343/*
4344 * Account for index blocks, block groups bitmaps and block group
4345 * descriptor blocks if modify datablocks and index blocks
4346 * worse case, the indexs blocks spread over different block groups
4347 *
4348 * If datablocks are discontiguous, they are possible to spread over
4349 * different block groups too. If they are contiuguous, with flexbg,
4350 * they could still across block group boundary.
4351 *
4352 * Also account for superblock, inode, quota and xattr blocks
4353 */
4354static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 
4355{
4356	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4357	int gdpblocks;
4358	int idxblocks;
4359	int ret = 0;
4360
4361	/*
4362	 * How many index blocks need to touch to modify nrblocks?
4363	 * The "Chunk" flag indicating whether the nrblocks is
4364	 * physically contiguous on disk
4365	 *
4366	 * For Direct IO and fallocate, they calls get_block to allocate
4367	 * one single extent at a time, so they could set the "Chunk" flag
4368	 */
4369	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4370
4371	ret = idxblocks;
4372
4373	/*
4374	 * Now let's see how many group bitmaps and group descriptors need
4375	 * to account
4376	 */
4377	groups = idxblocks;
4378	if (chunk)
4379		groups += 1;
4380	else
4381		groups += nrblocks;
4382
4383	gdpblocks = groups;
4384	if (groups > ngroups)
4385		groups = ngroups;
4386	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4387		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4388
4389	/* bitmaps and block group descriptor blocks */
4390	ret += groups + gdpblocks;
4391
4392	/* Blocks for super block, inode, quota and xattr blocks */
4393	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4394
4395	return ret;
4396}
4397
4398/*
4399 * Calculate the total number of credits to reserve to fit
4400 * the modification of a single pages into a single transaction,
4401 * which may include multiple chunks of block allocations.
4402 *
4403 * This could be called via ext4_write_begin()
4404 *
4405 * We need to consider the worse case, when
4406 * one new block per extent.
4407 */
4408int ext4_writepage_trans_blocks(struct inode *inode)
4409{
4410	int bpp = ext4_journal_blocks_per_page(inode);
4411	int ret;
4412
4413	ret = ext4_meta_trans_blocks(inode, bpp, 0);
4414
4415	/* Account for data blocks for journalled mode */
4416	if (ext4_should_journal_data(inode))
4417		ret += bpp;
4418	return ret;
4419}
4420
4421/*
4422 * Calculate the journal credits for a chunk of data modification.
4423 *
4424 * This is called from DIO, fallocate or whoever calling
4425 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4426 *
4427 * journal buffers for data blocks are not included here, as DIO
4428 * and fallocate do no need to journal data buffers.
4429 */
4430int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4431{
4432	return ext4_meta_trans_blocks(inode, nrblocks, 1);
4433}
4434
4435/*
4436 * The caller must have previously called ext4_reserve_inode_write().
4437 * Give this, we know that the caller already has write access to iloc->bh.
4438 */
4439int ext4_mark_iloc_dirty(handle_t *handle,
4440			 struct inode *inode, struct ext4_iloc *iloc)
4441{
4442	int err = 0;
4443
4444	if (IS_I_VERSION(inode))
4445		inode_inc_iversion(inode);
4446
4447	/* the do_update_inode consumes one bh->b_count */
4448	get_bh(iloc->bh);
4449
4450	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4451	err = ext4_do_update_inode(handle, inode, iloc);
4452	put_bh(iloc->bh);
4453	return err;
4454}
4455
4456/*
4457 * On success, We end up with an outstanding reference count against
4458 * iloc->bh.  This _must_ be cleaned up later.
4459 */
4460
4461int
4462ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4463			 struct ext4_iloc *iloc)
4464{
4465	int err;
4466
4467	err = ext4_get_inode_loc(inode, iloc);
4468	if (!err) {
4469		BUFFER_TRACE(iloc->bh, "get_write_access");
4470		err = ext4_journal_get_write_access(handle, iloc->bh);
4471		if (err) {
4472			brelse(iloc->bh);
4473			iloc->bh = NULL;
4474		}
4475	}
4476	ext4_std_error(inode->i_sb, err);
4477	return err;
4478}
4479
4480/*
4481 * Expand an inode by new_extra_isize bytes.
4482 * Returns 0 on success or negative error number on failure.
4483 */
4484static int ext4_expand_extra_isize(struct inode *inode,
4485				   unsigned int new_extra_isize,
4486				   struct ext4_iloc iloc,
4487				   handle_t *handle)
4488{
4489	struct ext4_inode *raw_inode;
4490	struct ext4_xattr_ibody_header *header;
4491
4492	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4493		return 0;
4494
4495	raw_inode = ext4_raw_inode(&iloc);
4496
4497	header = IHDR(inode, raw_inode);
4498
4499	/* No extended attributes present */
4500	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4501	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4502		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4503			new_extra_isize);
4504		EXT4_I(inode)->i_extra_isize = new_extra_isize;
4505		return 0;
4506	}
4507
4508	/* try to expand with EAs present */
4509	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4510					  raw_inode, handle);
4511}
4512
4513/*
4514 * What we do here is to mark the in-core inode as clean with respect to inode
4515 * dirtiness (it may still be data-dirty).
4516 * This means that the in-core inode may be reaped by prune_icache
4517 * without having to perform any I/O.  This is a very good thing,
4518 * because *any* task may call prune_icache - even ones which
4519 * have a transaction open against a different journal.
4520 *
4521 * Is this cheating?  Not really.  Sure, we haven't written the
4522 * inode out, but prune_icache isn't a user-visible syncing function.
4523 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4524 * we start and wait on commits.
4525 *
4526 * Is this efficient/effective?  Well, we're being nice to the system
4527 * by cleaning up our inodes proactively so they can be reaped
4528 * without I/O.  But we are potentially leaving up to five seconds'
4529 * worth of inodes floating about which prune_icache wants us to
4530 * write out.  One way to fix that would be to get prune_icache()
4531 * to do a write_super() to free up some memory.  It has the desired
4532 * effect.
4533 */
4534int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4535{
4536	struct ext4_iloc iloc;
4537	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4538	static unsigned int mnt_count;
4539	int err, ret;
4540
4541	might_sleep();
4542	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4543	err = ext4_reserve_inode_write(handle, inode, &iloc);
4544	if (ext4_handle_valid(handle) &&
4545	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4546	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4547		/*
4548		 * We need extra buffer credits since we may write into EA block
4549		 * with this same handle. If journal_extend fails, then it will
4550		 * only result in a minor loss of functionality for that inode.
4551		 * If this is felt to be critical, then e2fsck should be run to
4552		 * force a large enough s_min_extra_isize.
4553		 */
4554		if ((jbd2_journal_extend(handle,
4555			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4556			ret = ext4_expand_extra_isize(inode,
4557						      sbi->s_want_extra_isize,
4558						      iloc, handle);
4559			if (ret) {
4560				ext4_set_inode_state(inode,
4561						     EXT4_STATE_NO_EXPAND);
4562				if (mnt_count !=
4563					le16_to_cpu(sbi->s_es->s_mnt_count)) {
4564					ext4_warning(inode->i_sb,
4565					"Unable to expand inode %lu. Delete"
4566					" some EAs or run e2fsck.",
4567					inode->i_ino);
4568					mnt_count =
4569					  le16_to_cpu(sbi->s_es->s_mnt_count);
4570				}
4571			}
4572		}
4573	}
4574	if (!err)
4575		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4576	return err;
4577}
4578
4579/*
4580 * ext4_dirty_inode() is called from __mark_inode_dirty()
4581 *
4582 * We're really interested in the case where a file is being extended.
4583 * i_size has been changed by generic_commit_write() and we thus need
4584 * to include the updated inode in the current transaction.
4585 *
4586 * Also, dquot_alloc_block() will always dirty the inode when blocks
4587 * are allocated to the file.
4588 *
4589 * If the inode is marked synchronous, we don't honour that here - doing
4590 * so would cause a commit on atime updates, which we don't bother doing.
4591 * We handle synchronous inodes at the highest possible level.
4592 */
4593void ext4_dirty_inode(struct inode *inode, int flags)
4594{
4595	handle_t *handle;
4596
4597	handle = ext4_journal_start(inode, 2);
4598	if (IS_ERR(handle))
4599		goto out;
4600
4601	ext4_mark_inode_dirty(handle, inode);
4602
4603	ext4_journal_stop(handle);
4604out:
4605	return;
4606}
4607
4608#if 0
4609/*
4610 * Bind an inode's backing buffer_head into this transaction, to prevent
4611 * it from being flushed to disk early.  Unlike
4612 * ext4_reserve_inode_write, this leaves behind no bh reference and
4613 * returns no iloc structure, so the caller needs to repeat the iloc
4614 * lookup to mark the inode dirty later.
4615 */
4616static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4617{
4618	struct ext4_iloc iloc;
4619
4620	int err = 0;
4621	if (handle) {
4622		err = ext4_get_inode_loc(inode, &iloc);
4623		if (!err) {
4624			BUFFER_TRACE(iloc.bh, "get_write_access");
4625			err = jbd2_journal_get_write_access(handle, iloc.bh);
4626			if (!err)
4627				err = ext4_handle_dirty_metadata(handle,
4628								 NULL,
4629								 iloc.bh);
4630			brelse(iloc.bh);
4631		}
4632	}
4633	ext4_std_error(inode->i_sb, err);
4634	return err;
4635}
4636#endif
4637
4638int ext4_change_inode_journal_flag(struct inode *inode, int val)
4639{
4640	journal_t *journal;
4641	handle_t *handle;
4642	int err;
4643
4644	/*
4645	 * We have to be very careful here: changing a data block's
4646	 * journaling status dynamically is dangerous.  If we write a
4647	 * data block to the journal, change the status and then delete
4648	 * that block, we risk forgetting to revoke the old log record
4649	 * from the journal and so a subsequent replay can corrupt data.
4650	 * So, first we make sure that the journal is empty and that
4651	 * nobody is changing anything.
4652	 */
4653
4654	journal = EXT4_JOURNAL(inode);
4655	if (!journal)
4656		return 0;
4657	if (is_journal_aborted(journal))
4658		return -EROFS;
4659	/* We have to allocate physical blocks for delalloc blocks
4660	 * before flushing journal. otherwise delalloc blocks can not
4661	 * be allocated any more. even more truncate on delalloc blocks
4662	 * could trigger BUG by flushing delalloc blocks in journal.
4663	 * There is no delalloc block in non-journal data mode.
4664	 */
4665	if (val && test_opt(inode->i_sb, DELALLOC)) {
4666		err = ext4_alloc_da_blocks(inode);
4667		if (err < 0)
4668			return err;
4669	}
4670
 
 
 
 
4671	jbd2_journal_lock_updates(journal);
4672
4673	/*
4674	 * OK, there are no updates running now, and all cached data is
4675	 * synced to disk.  We are now in a completely consistent state
4676	 * which doesn't have anything in the journal, and we know that
4677	 * no filesystem updates are running, so it is safe to modify
4678	 * the inode's in-core data-journaling state flag now.
4679	 */
4680
4681	if (val)
4682		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4683	else {
4684		jbd2_journal_flush(journal);
4685		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4686	}
4687	ext4_set_aops(inode);
4688
4689	jbd2_journal_unlock_updates(journal);
 
4690
4691	/* Finally we can mark the inode as dirty. */
4692
4693	handle = ext4_journal_start(inode, 1);
4694	if (IS_ERR(handle))
4695		return PTR_ERR(handle);
4696
4697	err = ext4_mark_inode_dirty(handle, inode);
4698	ext4_handle_sync(handle);
4699	ext4_journal_stop(handle);
4700	ext4_std_error(inode->i_sb, err);
4701
4702	return err;
4703}
4704
4705static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
4706{
4707	return !buffer_mapped(bh);
4708}
4709
4710int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4711{
4712	struct page *page = vmf->page;
4713	loff_t size;
4714	unsigned long len;
4715	int ret;
4716	struct file *file = vma->vm_file;
4717	struct inode *inode = file->f_path.dentry->d_inode;
4718	struct address_space *mapping = inode->i_mapping;
4719	handle_t *handle;
4720	get_block_t *get_block;
4721	int retries = 0;
4722
4723	/*
4724	 * This check is racy but catches the common case. We rely on
4725	 * __block_page_mkwrite() to do a reliable check.
4726	 */
4727	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
4728	/* Delalloc case is easy... */
4729	if (test_opt(inode->i_sb, DELALLOC) &&
4730	    !ext4_should_journal_data(inode) &&
4731	    !ext4_nonda_switch(inode->i_sb)) {
4732		do {
4733			ret = __block_page_mkwrite(vma, vmf,
4734						   ext4_da_get_block_prep);
4735		} while (ret == -ENOSPC &&
4736		       ext4_should_retry_alloc(inode->i_sb, &retries));
4737		goto out_ret;
4738	}
4739
4740	lock_page(page);
4741	size = i_size_read(inode);
4742	/* Page got truncated from under us? */
4743	if (page->mapping != mapping || page_offset(page) > size) {
4744		unlock_page(page);
4745		ret = VM_FAULT_NOPAGE;
4746		goto out;
4747	}
4748
4749	if (page->index == size >> PAGE_CACHE_SHIFT)
4750		len = size & ~PAGE_CACHE_MASK;
4751	else
4752		len = PAGE_CACHE_SIZE;
4753	/*
4754	 * Return if we have all the buffers mapped. This avoids the need to do
4755	 * journal_start/journal_stop which can block and take a long time
4756	 */
4757	if (page_has_buffers(page)) {
4758		if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4759					ext4_bh_unmapped)) {
 
4760			/* Wait so that we don't change page under IO */
4761			wait_on_page_writeback(page);
4762			ret = VM_FAULT_LOCKED;
4763			goto out;
4764		}
4765	}
4766	unlock_page(page);
4767	/* OK, we need to fill the hole... */
4768	if (ext4_should_dioread_nolock(inode))
4769		get_block = ext4_get_block_write;
4770	else
4771		get_block = ext4_get_block;
4772retry_alloc:
4773	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
 
4774	if (IS_ERR(handle)) {
4775		ret = VM_FAULT_SIGBUS;
4776		goto out;
4777	}
4778	ret = __block_page_mkwrite(vma, vmf, get_block);
4779	if (!ret && ext4_should_journal_data(inode)) {
4780		if (walk_page_buffers(handle, page_buffers(page), 0,
4781			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
4782			unlock_page(page);
4783			ret = VM_FAULT_SIGBUS;
4784			ext4_journal_stop(handle);
4785			goto out;
4786		}
4787		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4788	}
4789	ext4_journal_stop(handle);
4790	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4791		goto retry_alloc;
4792out_ret:
4793	ret = block_page_mkwrite_return(ret);
4794out:
 
4795	return ret;
4796}
v3.15
   1/*
   2 *  linux/fs/ext4/inode.c
   3 *
   4 * Copyright (C) 1992, 1993, 1994, 1995
   5 * Remy Card (card@masi.ibp.fr)
   6 * Laboratoire MASI - Institut Blaise Pascal
   7 * Universite Pierre et Marie Curie (Paris VI)
   8 *
   9 *  from
  10 *
  11 *  linux/fs/minix/inode.c
  12 *
  13 *  Copyright (C) 1991, 1992  Linus Torvalds
  14 *
  15 *  64-bit file support on 64-bit platforms by Jakub Jelinek
  16 *	(jj@sunsite.ms.mff.cuni.cz)
  17 *
  18 *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  19 */
  20
  21#include <linux/fs.h>
  22#include <linux/time.h>
  23#include <linux/jbd2.h>
  24#include <linux/highuid.h>
  25#include <linux/pagemap.h>
  26#include <linux/quotaops.h>
  27#include <linux/string.h>
  28#include <linux/buffer_head.h>
  29#include <linux/writeback.h>
  30#include <linux/pagevec.h>
  31#include <linux/mpage.h>
  32#include <linux/namei.h>
  33#include <linux/uio.h>
  34#include <linux/bio.h>
  35#include <linux/workqueue.h>
  36#include <linux/kernel.h>
  37#include <linux/printk.h>
  38#include <linux/slab.h>
  39#include <linux/ratelimit.h>
  40#include <linux/aio.h>
  41#include <linux/bitops.h>
  42
  43#include "ext4_jbd2.h"
  44#include "xattr.h"
  45#include "acl.h"
  46#include "truncate.h"
  47
  48#include <trace/events/ext4.h>
  49
  50#define MPAGE_DA_EXTENT_TAIL 0x01
  51
  52static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
  53			      struct ext4_inode_info *ei)
  54{
  55	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  56	__u16 csum_lo;
  57	__u16 csum_hi = 0;
  58	__u32 csum;
  59
  60	csum_lo = le16_to_cpu(raw->i_checksum_lo);
  61	raw->i_checksum_lo = 0;
  62	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  63	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
  64		csum_hi = le16_to_cpu(raw->i_checksum_hi);
  65		raw->i_checksum_hi = 0;
  66	}
  67
  68	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
  69			   EXT4_INODE_SIZE(inode->i_sb));
  70
  71	raw->i_checksum_lo = cpu_to_le16(csum_lo);
  72	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  73	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  74		raw->i_checksum_hi = cpu_to_le16(csum_hi);
  75
  76	return csum;
  77}
  78
  79static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
  80				  struct ext4_inode_info *ei)
  81{
  82	__u32 provided, calculated;
  83
  84	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  85	    cpu_to_le32(EXT4_OS_LINUX) ||
  86	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  87		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
  88		return 1;
  89
  90	provided = le16_to_cpu(raw->i_checksum_lo);
  91	calculated = ext4_inode_csum(inode, raw, ei);
  92	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
  93	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
  94		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
  95	else
  96		calculated &= 0xFFFF;
  97
  98	return provided == calculated;
  99}
 100
 101static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
 102				struct ext4_inode_info *ei)
 103{
 104	__u32 csum;
 105
 106	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 107	    cpu_to_le32(EXT4_OS_LINUX) ||
 108	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
 109		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
 110		return;
 111
 112	csum = ext4_inode_csum(inode, raw, ei);
 113	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
 114	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
 115	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
 116		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
 117}
 118
 119static inline int ext4_begin_ordered_truncate(struct inode *inode,
 120					      loff_t new_size)
 121{
 122	trace_ext4_begin_ordered_truncate(inode, new_size);
 123	/*
 124	 * If jinode is zero, then we never opened the file for
 125	 * writing, so there's no need to call
 126	 * jbd2_journal_begin_ordered_truncate() since there's no
 127	 * outstanding writes we need to flush.
 128	 */
 129	if (!EXT4_I(inode)->jinode)
 130		return 0;
 131	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 132						   EXT4_I(inode)->jinode,
 133						   new_size);
 134}
 135
 136static void ext4_invalidatepage(struct page *page, unsigned int offset,
 137				unsigned int length);
 
 
 
 138static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 139static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 140static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
 141				  int pextents);
 
 142
 143/*
 144 * Test whether an inode is a fast symlink.
 145 */
 146static int ext4_inode_is_fast_symlink(struct inode *inode)
 147{
 148        int ea_blocks = EXT4_I(inode)->i_file_acl ?
 149		EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
 150
 151	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 152}
 153
 154/*
 155 * Restart the transaction associated with *handle.  This does a commit,
 156 * so before we call here everything must be consistently dirtied against
 157 * this transaction.
 158 */
 159int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
 160				 int nblocks)
 161{
 162	int ret;
 163
 164	/*
 165	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
 166	 * moment, get_block can be called only for blocks inside i_size since
 167	 * page cache has been already dropped and writes are blocked by
 168	 * i_mutex. So we can safely drop the i_data_sem here.
 169	 */
 170	BUG_ON(EXT4_JOURNAL(inode) == NULL);
 171	jbd_debug(2, "restarting handle %p\n", handle);
 172	up_write(&EXT4_I(inode)->i_data_sem);
 173	ret = ext4_journal_restart(handle, nblocks);
 174	down_write(&EXT4_I(inode)->i_data_sem);
 175	ext4_discard_preallocations(inode);
 176
 177	return ret;
 178}
 179
 180/*
 181 * Called at the last iput() if i_nlink is zero.
 182 */
 183void ext4_evict_inode(struct inode *inode)
 184{
 185	handle_t *handle;
 186	int err;
 187
 188	trace_ext4_evict_inode(inode);
 189
 
 
 190	if (inode->i_nlink) {
 191		/*
 192		 * When journalling data dirty buffers are tracked only in the
 193		 * journal. So although mm thinks everything is clean and
 194		 * ready for reaping the inode might still have some pages to
 195		 * write in the running transaction or waiting to be
 196		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
 197		 * (via truncate_inode_pages()) to discard these buffers can
 198		 * cause data loss. Also even if we did not discard these
 199		 * buffers, we would have no way to find them after the inode
 200		 * is reaped and thus user could see stale data if he tries to
 201		 * read them before the transaction is checkpointed. So be
 202		 * careful and force everything to disk here... We use
 203		 * ei->i_datasync_tid to store the newest transaction
 204		 * containing inode's data.
 205		 *
 206		 * Note that directories do not have this problem because they
 207		 * don't use page cache.
 208		 */
 209		if (ext4_should_journal_data(inode) &&
 210		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
 211		    inode->i_ino != EXT4_JOURNAL_INO) {
 212			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
 213			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
 214
 215			jbd2_complete_transaction(journal, commit_tid);
 
 216			filemap_write_and_wait(&inode->i_data);
 217		}
 218		truncate_inode_pages_final(&inode->i_data);
 219
 220		WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
 221		goto no_delete;
 222	}
 223
 224	if (!is_bad_inode(inode))
 225		dquot_initialize(inode);
 226
 227	if (ext4_should_order_data(inode))
 228		ext4_begin_ordered_truncate(inode, 0);
 229	truncate_inode_pages_final(&inode->i_data);
 230
 231	WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
 232	if (is_bad_inode(inode))
 233		goto no_delete;
 234
 235	/*
 236	 * Protect us against freezing - iput() caller didn't have to have any
 237	 * protection against it
 238	 */
 239	sb_start_intwrite(inode->i_sb);
 240	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
 241				    ext4_blocks_for_truncate(inode)+3);
 242	if (IS_ERR(handle)) {
 243		ext4_std_error(inode->i_sb, PTR_ERR(handle));
 244		/*
 245		 * If we're going to skip the normal cleanup, we still need to
 246		 * make sure that the in-core orphan linked list is properly
 247		 * cleaned up.
 248		 */
 249		ext4_orphan_del(NULL, inode);
 250		sb_end_intwrite(inode->i_sb);
 251		goto no_delete;
 252	}
 253
 254	if (IS_SYNC(inode))
 255		ext4_handle_sync(handle);
 256	inode->i_size = 0;
 257	err = ext4_mark_inode_dirty(handle, inode);
 258	if (err) {
 259		ext4_warning(inode->i_sb,
 260			     "couldn't mark inode dirty (err %d)", err);
 261		goto stop_handle;
 262	}
 263	if (inode->i_blocks)
 264		ext4_truncate(inode);
 265
 266	/*
 267	 * ext4_ext_truncate() doesn't reserve any slop when it
 268	 * restarts journal transactions; therefore there may not be
 269	 * enough credits left in the handle to remove the inode from
 270	 * the orphan list and set the dtime field.
 271	 */
 272	if (!ext4_handle_has_enough_credits(handle, 3)) {
 273		err = ext4_journal_extend(handle, 3);
 274		if (err > 0)
 275			err = ext4_journal_restart(handle, 3);
 276		if (err != 0) {
 277			ext4_warning(inode->i_sb,
 278				     "couldn't extend journal (err %d)", err);
 279		stop_handle:
 280			ext4_journal_stop(handle);
 281			ext4_orphan_del(NULL, inode);
 282			sb_end_intwrite(inode->i_sb);
 283			goto no_delete;
 284		}
 285	}
 286
 287	/*
 288	 * Kill off the orphan record which ext4_truncate created.
 289	 * AKPM: I think this can be inside the above `if'.
 290	 * Note that ext4_orphan_del() has to be able to cope with the
 291	 * deletion of a non-existent orphan - this is because we don't
 292	 * know if ext4_truncate() actually created an orphan record.
 293	 * (Well, we could do this if we need to, but heck - it works)
 294	 */
 295	ext4_orphan_del(handle, inode);
 296	EXT4_I(inode)->i_dtime	= get_seconds();
 297
 298	/*
 299	 * One subtle ordering requirement: if anything has gone wrong
 300	 * (transaction abort, IO errors, whatever), then we can still
 301	 * do these next steps (the fs will already have been marked as
 302	 * having errors), but we can't free the inode if the mark_dirty
 303	 * fails.
 304	 */
 305	if (ext4_mark_inode_dirty(handle, inode))
 306		/* If that failed, just do the required in-core inode clear. */
 307		ext4_clear_inode(inode);
 308	else
 309		ext4_free_inode(handle, inode);
 310	ext4_journal_stop(handle);
 311	sb_end_intwrite(inode->i_sb);
 312	return;
 313no_delete:
 314	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
 315}
 316
 317#ifdef CONFIG_QUOTA
 318qsize_t *ext4_get_reserved_space(struct inode *inode)
 319{
 320	return &EXT4_I(inode)->i_reserved_quota;
 321}
 322#endif
 323
 324/*
 325 * Calculate the number of metadata blocks need to reserve
 326 * to allocate a block located at @lblock
 327 */
 328static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 329{
 330	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 331		return ext4_ext_calc_metadata_amount(inode, lblock);
 332
 333	return ext4_ind_calc_metadata_amount(inode, lblock);
 334}
 335
 336/*
 337 * Called with i_data_sem down, which is important since we can call
 338 * ext4_discard_preallocations() from here.
 339 */
 340void ext4_da_update_reserve_space(struct inode *inode,
 341					int used, int quota_claim)
 342{
 343	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 344	struct ext4_inode_info *ei = EXT4_I(inode);
 345
 346	spin_lock(&ei->i_block_reservation_lock);
 347	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 348	if (unlikely(used > ei->i_reserved_data_blocks)) {
 349		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
 350			 "with only %d reserved data blocks",
 351			 __func__, inode->i_ino, used,
 352			 ei->i_reserved_data_blocks);
 353		WARN_ON(1);
 354		used = ei->i_reserved_data_blocks;
 355	}
 356
 357	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
 358		ext4_warning(inode->i_sb, "ino %lu, allocated %d "
 359			"with only %d reserved metadata blocks "
 360			"(releasing %d blocks with reserved %d data blocks)",
 361			inode->i_ino, ei->i_allocated_meta_blocks,
 362			     ei->i_reserved_meta_blocks, used,
 363			     ei->i_reserved_data_blocks);
 364		WARN_ON(1);
 365		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
 366	}
 367
 368	/* Update per-inode reservations */
 369	ei->i_reserved_data_blocks -= used;
 370	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
 371	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 372			   used + ei->i_allocated_meta_blocks);
 373	ei->i_allocated_meta_blocks = 0;
 374
 375	if (ei->i_reserved_data_blocks == 0) {
 376		/*
 377		 * We can release all of the reserved metadata blocks
 378		 * only when we have written all of the delayed
 379		 * allocation blocks.
 380		 */
 381		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
 382				   ei->i_reserved_meta_blocks);
 383		ei->i_reserved_meta_blocks = 0;
 384		ei->i_da_metadata_calc_len = 0;
 385	}
 386	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 387
 388	/* Update quota subsystem for data blocks */
 389	if (quota_claim)
 390		dquot_claim_block(inode, EXT4_C2B(sbi, used));
 391	else {
 392		/*
 393		 * We did fallocate with an offset that is already delayed
 394		 * allocated. So on delayed allocated writeback we should
 395		 * not re-claim the quota for fallocated blocks.
 396		 */
 397		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
 398	}
 399
 400	/*
 401	 * If we have done all the pending block allocations and if
 402	 * there aren't any writers on the inode, we can discard the
 403	 * inode's preallocations.
 404	 */
 405	if ((ei->i_reserved_data_blocks == 0) &&
 406	    (atomic_read(&inode->i_writecount) == 0))
 407		ext4_discard_preallocations(inode);
 408}
 409
 410static int __check_block_validity(struct inode *inode, const char *func,
 411				unsigned int line,
 412				struct ext4_map_blocks *map)
 413{
 414	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
 415				   map->m_len)) {
 416		ext4_error_inode(inode, func, line, map->m_pblk,
 417				 "lblock %lu mapped to illegal pblock "
 418				 "(length %d)", (unsigned long) map->m_lblk,
 419				 map->m_len);
 420		return -EIO;
 421	}
 422	return 0;
 423}
 424
 425#define check_block_validity(inode, map)	\
 426	__check_block_validity((inode), __func__, __LINE__, (map))
 427
 428#ifdef ES_AGGRESSIVE_TEST
 429static void ext4_map_blocks_es_recheck(handle_t *handle,
 430				       struct inode *inode,
 431				       struct ext4_map_blocks *es_map,
 432				       struct ext4_map_blocks *map,
 433				       int flags)
 434{
 435	int retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436
 437	map->m_flags = 0;
 438	/*
 439	 * There is a race window that the result is not the same.
 440	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
 441	 * is that we lookup a block mapping in extent status tree with
 442	 * out taking i_data_sem.  So at the time the unwritten extent
 443	 * could be converted.
 444	 */
 445	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
 446		down_read((&EXT4_I(inode)->i_data_sem));
 447	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 448		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 449					     EXT4_GET_BLOCKS_KEEP_SIZE);
 450	} else {
 451		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 452					     EXT4_GET_BLOCKS_KEEP_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 453	}
 454	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
 455		up_read((&EXT4_I(inode)->i_data_sem));
 456	/*
 457	 * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
 458	 * because it shouldn't be marked in es_map->m_flags.
 459	 */
 460	map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461
 462	/*
 463	 * We don't check m_len because extent will be collpased in status
 464	 * tree.  So the m_len might not equal.
 465	 */
 466	if (es_map->m_lblk != map->m_lblk ||
 467	    es_map->m_flags != map->m_flags ||
 468	    es_map->m_pblk != map->m_pblk) {
 469		printk("ES cache assertion failed for inode: %lu "
 470		       "es_cached ex [%d/%d/%llu/%x] != "
 471		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
 472		       inode->i_ino, es_map->m_lblk, es_map->m_len,
 473		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
 474		       map->m_len, map->m_pblk, map->m_flags,
 475		       retval, flags);
 476	}
 477}
 478#endif /* ES_AGGRESSIVE_TEST */
 479
 480/*
 481 * The ext4_map_blocks() function tries to look up the requested blocks,
 482 * and returns if the blocks are already mapped.
 483 *
 484 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
 485 * and store the allocated blocks in the result buffer head and mark it
 486 * mapped.
 487 *
 488 * If file type is extents based, it will call ext4_ext_map_blocks(),
 489 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
 490 * based files
 491 *
 492 * On success, it returns the number of blocks being mapped or allocate.
 493 * if create==0 and the blocks are pre-allocated and uninitialized block,
 494 * the result buffer head is unmapped. If the create ==1, it will make sure
 495 * the buffer head is mapped.
 496 *
 497 * It returns 0 if plain look up failed (blocks have not been allocated), in
 498 * that case, buffer head is unmapped
 499 *
 500 * It returns the error in case of allocation failure.
 501 */
 502int ext4_map_blocks(handle_t *handle, struct inode *inode,
 503		    struct ext4_map_blocks *map, int flags)
 504{
 505	struct extent_status es;
 506	int retval;
 507	int ret = 0;
 508#ifdef ES_AGGRESSIVE_TEST
 509	struct ext4_map_blocks orig_map;
 510
 511	memcpy(&orig_map, map, sizeof(*map));
 512#endif
 513
 514	map->m_flags = 0;
 515	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
 516		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
 517		  (unsigned long) map->m_lblk);
 518
 519	/*
 520	 * ext4_map_blocks returns an int, and m_len is an unsigned int
 521	 */
 522	if (unlikely(map->m_len > INT_MAX))
 523		map->m_len = INT_MAX;
 524
 525	/* We can handle the block number less than EXT_MAX_BLOCKS */
 526	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
 527		return -EIO;
 528
 529	/* Lookup extent status tree firstly */
 530	if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
 531		ext4_es_lru_add(inode);
 532		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
 533			map->m_pblk = ext4_es_pblock(&es) +
 534					map->m_lblk - es.es_lblk;
 535			map->m_flags |= ext4_es_is_written(&es) ?
 536					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
 537			retval = es.es_len - (map->m_lblk - es.es_lblk);
 538			if (retval > map->m_len)
 539				retval = map->m_len;
 540			map->m_len = retval;
 541		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
 542			retval = 0;
 543		} else {
 544			BUG_ON(1);
 545		}
 546#ifdef ES_AGGRESSIVE_TEST
 547		ext4_map_blocks_es_recheck(handle, inode, map,
 548					   &orig_map, flags);
 549#endif
 550		goto found;
 551	}
 552
 553	/*
 554	 * Try to see if we can get the block without requesting a new
 555	 * file system block.
 556	 */
 557	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
 558		down_read((&EXT4_I(inode)->i_data_sem));
 559	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 560		retval = ext4_ext_map_blocks(handle, inode, map, flags &
 561					     EXT4_GET_BLOCKS_KEEP_SIZE);
 562	} else {
 563		retval = ext4_ind_map_blocks(handle, inode, map, flags &
 564					     EXT4_GET_BLOCKS_KEEP_SIZE);
 565	}
 566	if (retval > 0) {
 567		unsigned int status;
 568
 569		if (unlikely(retval != map->m_len)) {
 570			ext4_warning(inode->i_sb,
 571				     "ES len assertion failed for inode "
 572				     "%lu: retval %d != map->m_len %d",
 573				     inode->i_ino, retval, map->m_len);
 574			WARN_ON(1);
 575		}
 576
 577		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 578				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 579		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 580		    ext4_find_delalloc_range(inode, map->m_lblk,
 581					     map->m_lblk + map->m_len - 1))
 582			status |= EXTENT_STATUS_DELAYED;
 583		ret = ext4_es_insert_extent(inode, map->m_lblk,
 584					    map->m_len, map->m_pblk, status);
 585		if (ret < 0)
 586			retval = ret;
 587	}
 588	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
 589		up_read((&EXT4_I(inode)->i_data_sem));
 590
 591found:
 592	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 593		ret = check_block_validity(inode, map);
 594		if (ret != 0)
 595			return ret;
 596	}
 597
 598	/* If it is only a block(s) look up */
 599	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
 600		return retval;
 601
 602	/*
 603	 * Returns if the blocks have already allocated
 604	 *
 605	 * Note that if blocks have been preallocated
 606	 * ext4_ext_get_block() returns the create = 0
 607	 * with buffer head unmapped.
 608	 */
 609	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
 610		/*
 611		 * If we need to convert extent to unwritten
 612		 * we continue and do the actual work in
 613		 * ext4_ext_map_blocks()
 614		 */
 615		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
 616			return retval;
 617
 618	/*
 619	 * Here we clear m_flags because after allocating an new extent,
 620	 * it will be set again.
 
 
 
 
 
 
 621	 */
 622	map->m_flags &= ~EXT4_MAP_FLAGS;
 623
 624	/*
 625	 * New blocks allocate and/or writing to uninitialized extent
 626	 * will possibly result in updating i_data, so we take
 627	 * the write lock of i_data_sem, and call get_blocks()
 628	 * with create == 1 flag.
 629	 */
 630	down_write((&EXT4_I(inode)->i_data_sem));
 631
 632	/*
 633	 * if the caller is from delayed allocation writeout path
 634	 * we have already reserved fs blocks for allocation
 635	 * let the underlying get_block() function know to
 636	 * avoid double accounting
 637	 */
 638	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
 639		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 640	/*
 641	 * We need to check for EXT4 here because migrate
 642	 * could have changed the inode type in between
 643	 */
 644	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
 645		retval = ext4_ext_map_blocks(handle, inode, map, flags);
 646	} else {
 647		retval = ext4_ind_map_blocks(handle, inode, map, flags);
 648
 649		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
 650			/*
 651			 * We allocated new blocks which will result in
 652			 * i_data's format changing.  Force the migrate
 653			 * to fail by clearing migrate flags
 654			 */
 655			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
 656		}
 657
 658		/*
 659		 * Update reserved blocks/metadata blocks after successful
 660		 * block allocation which had been deferred till now. We don't
 661		 * support fallocate for non extent files. So we can update
 662		 * reserve space here.
 663		 */
 664		if ((retval > 0) &&
 665			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
 666			ext4_da_update_reserve_space(inode, retval, 1);
 667	}
 668	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
 669		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 670
 671	if (retval > 0) {
 672		unsigned int status;
 673
 674		if (unlikely(retval != map->m_len)) {
 675			ext4_warning(inode->i_sb,
 676				     "ES len assertion failed for inode "
 677				     "%lu: retval %d != map->m_len %d",
 678				     inode->i_ino, retval, map->m_len);
 679			WARN_ON(1);
 680		}
 681
 682		/*
 683		 * If the extent has been zeroed out, we don't need to update
 684		 * extent status tree.
 685		 */
 686		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
 687		    ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
 688			if (ext4_es_is_written(&es))
 689				goto has_zeroout;
 690		}
 691		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
 692				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
 693		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
 694		    ext4_find_delalloc_range(inode, map->m_lblk,
 695					     map->m_lblk + map->m_len - 1))
 696			status |= EXTENT_STATUS_DELAYED;
 697		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
 698					    map->m_pblk, status);
 699		if (ret < 0)
 700			retval = ret;
 701	}
 702
 703has_zeroout:
 704	up_write((&EXT4_I(inode)->i_data_sem));
 705	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
 706		ret = check_block_validity(inode, map);
 707		if (ret != 0)
 708			return ret;
 709	}
 710	return retval;
 711}
 712
 713/* Maximum number of blocks we map for direct IO at once. */
 714#define DIO_MAX_BLOCKS 4096
 715
 716static int _ext4_get_block(struct inode *inode, sector_t iblock,
 717			   struct buffer_head *bh, int flags)
 718{
 719	handle_t *handle = ext4_journal_current_handle();
 720	struct ext4_map_blocks map;
 721	int ret = 0, started = 0;
 722	int dio_credits;
 723
 724	if (ext4_has_inline_data(inode))
 725		return -ERANGE;
 726
 727	map.m_lblk = iblock;
 728	map.m_len = bh->b_size >> inode->i_blkbits;
 729
 730	if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
 731		/* Direct IO write... */
 732		if (map.m_len > DIO_MAX_BLOCKS)
 733			map.m_len = DIO_MAX_BLOCKS;
 734		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
 735		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
 736					    dio_credits);
 737		if (IS_ERR(handle)) {
 738			ret = PTR_ERR(handle);
 739			return ret;
 740		}
 741		started = 1;
 742	}
 743
 744	ret = ext4_map_blocks(handle, inode, &map, flags);
 745	if (ret > 0) {
 746		ext4_io_end_t *io_end = ext4_inode_aio(inode);
 747
 748		map_bh(bh, inode->i_sb, map.m_pblk);
 749		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
 750		if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
 751			set_buffer_defer_completion(bh);
 752		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 753		ret = 0;
 754	}
 755	if (started)
 756		ext4_journal_stop(handle);
 757	return ret;
 758}
 759
 760int ext4_get_block(struct inode *inode, sector_t iblock,
 761		   struct buffer_head *bh, int create)
 762{
 763	return _ext4_get_block(inode, iblock, bh,
 764			       create ? EXT4_GET_BLOCKS_CREATE : 0);
 765}
 766
 767/*
 768 * `handle' can be NULL if create is zero
 769 */
 770struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 771				ext4_lblk_t block, int create, int *errp)
 772{
 773	struct ext4_map_blocks map;
 774	struct buffer_head *bh;
 775	int fatal = 0, err;
 776
 777	J_ASSERT(handle != NULL || create == 0);
 778
 779	map.m_lblk = block;
 780	map.m_len = 1;
 781	err = ext4_map_blocks(handle, inode, &map,
 782			      create ? EXT4_GET_BLOCKS_CREATE : 0);
 783
 784	/* ensure we send some value back into *errp */
 785	*errp = 0;
 786
 787	if (create && err == 0)
 788		err = -ENOSPC;	/* should never happen */
 789	if (err < 0)
 790		*errp = err;
 791	if (err <= 0)
 792		return NULL;
 
 793
 794	bh = sb_getblk(inode->i_sb, map.m_pblk);
 795	if (unlikely(!bh)) {
 796		*errp = -ENOMEM;
 797		return NULL;
 798	}
 799	if (map.m_flags & EXT4_MAP_NEW) {
 800		J_ASSERT(create != 0);
 801		J_ASSERT(handle != NULL);
 802
 803		/*
 804		 * Now that we do not always journal data, we should
 805		 * keep in mind whether this should always journal the
 806		 * new buffer as metadata.  For now, regular file
 807		 * writes use ext4_get_block instead, so it's not a
 808		 * problem.
 809		 */
 810		lock_buffer(bh);
 811		BUFFER_TRACE(bh, "call get_create_access");
 812		fatal = ext4_journal_get_create_access(handle, bh);
 813		if (!fatal && !buffer_uptodate(bh)) {
 814			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
 815			set_buffer_uptodate(bh);
 816		}
 817		unlock_buffer(bh);
 818		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 819		err = ext4_handle_dirty_metadata(handle, inode, bh);
 820		if (!fatal)
 821			fatal = err;
 822	} else {
 823		BUFFER_TRACE(bh, "not a new buffer");
 824	}
 825	if (fatal) {
 826		*errp = fatal;
 827		brelse(bh);
 828		bh = NULL;
 829	}
 830	return bh;
 831}
 832
 833struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
 834			       ext4_lblk_t block, int create, int *err)
 835{
 836	struct buffer_head *bh;
 837
 838	bh = ext4_getblk(handle, inode, block, create, err);
 839	if (!bh)
 840		return bh;
 841	if (buffer_uptodate(bh))
 842		return bh;
 843	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
 844	wait_on_buffer(bh);
 845	if (buffer_uptodate(bh))
 846		return bh;
 847	put_bh(bh);
 848	*err = -EIO;
 849	return NULL;
 850}
 851
 852int ext4_walk_page_buffers(handle_t *handle,
 853			   struct buffer_head *head,
 854			   unsigned from,
 855			   unsigned to,
 856			   int *partial,
 857			   int (*fn)(handle_t *handle,
 858				     struct buffer_head *bh))
 859{
 860	struct buffer_head *bh;
 861	unsigned block_start, block_end;
 862	unsigned blocksize = head->b_size;
 863	int err, ret = 0;
 864	struct buffer_head *next;
 865
 866	for (bh = head, block_start = 0;
 867	     ret == 0 && (bh != head || !block_start);
 868	     block_start = block_end, bh = next) {
 869		next = bh->b_this_page;
 870		block_end = block_start + blocksize;
 871		if (block_end <= from || block_start >= to) {
 872			if (partial && !buffer_uptodate(bh))
 873				*partial = 1;
 874			continue;
 875		}
 876		err = (*fn)(handle, bh);
 877		if (!ret)
 878			ret = err;
 879	}
 880	return ret;
 881}
 882
 883/*
 884 * To preserve ordering, it is essential that the hole instantiation and
 885 * the data write be encapsulated in a single transaction.  We cannot
 886 * close off a transaction and start a new one between the ext4_get_block()
 887 * and the commit_write().  So doing the jbd2_journal_start at the start of
 888 * prepare_write() is the right place.
 889 *
 890 * Also, this function can nest inside ext4_writepage().  In that case, we
 891 * *know* that ext4_writepage() has generated enough buffer credits to do the
 892 * whole page.  So we won't block on the journal in that case, which is good,
 893 * because the caller may be PF_MEMALLOC.
 
 894 *
 895 * By accident, ext4 can be reentered when a transaction is open via
 896 * quota file writes.  If we were to commit the transaction while thus
 897 * reentered, there can be a deadlock - we would be holding a quota
 898 * lock, and the commit would never complete if another thread had a
 899 * transaction open and was blocking on the quota lock - a ranking
 900 * violation.
 901 *
 902 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
 903 * will _not_ run commit under these circumstances because handle->h_ref
 904 * is elevated.  We'll still have enough credits for the tiny quotafile
 905 * write.
 906 */
 907int do_journal_get_write_access(handle_t *handle,
 908				struct buffer_head *bh)
 909{
 910	int dirty = buffer_dirty(bh);
 911	int ret;
 912
 913	if (!buffer_mapped(bh) || buffer_freed(bh))
 914		return 0;
 915	/*
 916	 * __block_write_begin() could have dirtied some buffers. Clean
 917	 * the dirty bit as jbd2_journal_get_write_access() could complain
 918	 * otherwise about fs integrity issues. Setting of the dirty bit
 919	 * by __block_write_begin() isn't a real problem here as we clear
 920	 * the bit before releasing a page lock and thus writeback cannot
 921	 * ever write the buffer.
 922	 */
 923	if (dirty)
 924		clear_buffer_dirty(bh);
 925	ret = ext4_journal_get_write_access(handle, bh);
 926	if (!ret && dirty)
 927		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
 928	return ret;
 929}
 930
 931static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
 932		   struct buffer_head *bh_result, int create);
 933static int ext4_write_begin(struct file *file, struct address_space *mapping,
 934			    loff_t pos, unsigned len, unsigned flags,
 935			    struct page **pagep, void **fsdata)
 936{
 937	struct inode *inode = mapping->host;
 938	int ret, needed_blocks;
 939	handle_t *handle;
 940	int retries = 0;
 941	struct page *page;
 942	pgoff_t index;
 943	unsigned from, to;
 944
 945	trace_ext4_write_begin(inode, pos, len, flags);
 946	/*
 947	 * Reserve one block more for addition to orphan list in case
 948	 * we allocate blocks but write fails for some reason
 949	 */
 950	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
 951	index = pos >> PAGE_CACHE_SHIFT;
 952	from = pos & (PAGE_CACHE_SIZE - 1);
 953	to = from + len;
 954
 955	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
 956		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
 957						    flags, pagep);
 958		if (ret < 0)
 959			return ret;
 960		if (ret == 1)
 961			return 0;
 962	}
 963
 964	/*
 965	 * grab_cache_page_write_begin() can take a long time if the
 966	 * system is thrashing due to memory pressure, or if the page
 967	 * is being written back.  So grab it first before we start
 968	 * the transaction handle.  This also allows us to allocate
 969	 * the page (if needed) without using GFP_NOFS.
 970	 */
 971retry_grab:
 972	page = grab_cache_page_write_begin(mapping, index, flags);
 973	if (!page)
 974		return -ENOMEM;
 975	unlock_page(page);
 976
 977retry_journal:
 978	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
 979	if (IS_ERR(handle)) {
 980		page_cache_release(page);
 981		return PTR_ERR(handle);
 982	}
 983
 984	lock_page(page);
 985	if (page->mapping != mapping) {
 986		/* The page got truncated from under us */
 987		unlock_page(page);
 988		page_cache_release(page);
 989		ext4_journal_stop(handle);
 990		goto retry_grab;
 
 991	}
 992	/* In case writeback began while the page was unlocked */
 993	wait_for_stable_page(page);
 994
 995	if (ext4_should_dioread_nolock(inode))
 996		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
 997	else
 998		ret = __block_write_begin(page, pos, len, ext4_get_block);
 999
1000	if (!ret && ext4_should_journal_data(inode)) {
1001		ret = ext4_walk_page_buffers(handle, page_buffers(page),
1002					     from, to, NULL,
1003					     do_journal_get_write_access);
1004	}
1005
1006	if (ret) {
1007		unlock_page(page);
 
1008		/*
1009		 * __block_write_begin may have instantiated a few blocks
1010		 * outside i_size.  Trim these off again. Don't need
1011		 * i_size_read because we hold i_mutex.
1012		 *
1013		 * Add inode to orphan list in case we crash before
1014		 * truncate finishes
1015		 */
1016		if (pos + len > inode->i_size && ext4_can_truncate(inode))
1017			ext4_orphan_add(handle, inode);
1018
1019		ext4_journal_stop(handle);
1020		if (pos + len > inode->i_size) {
1021			ext4_truncate_failed_write(inode);
1022			/*
1023			 * If truncate failed early the inode might
1024			 * still be on the orphan list; we need to
1025			 * make sure the inode is removed from the
1026			 * orphan list in that case.
1027			 */
1028			if (inode->i_nlink)
1029				ext4_orphan_del(NULL, inode);
1030		}
 
1031
1032		if (ret == -ENOSPC &&
1033		    ext4_should_retry_alloc(inode->i_sb, &retries))
1034			goto retry_journal;
1035		page_cache_release(page);
1036		return ret;
1037	}
1038	*pagep = page;
1039	return ret;
1040}
1041
1042/* For write_end() in data=journal mode */
1043static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1044{
1045	int ret;
1046	if (!buffer_mapped(bh) || buffer_freed(bh))
1047		return 0;
1048	set_buffer_uptodate(bh);
1049	ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1050	clear_buffer_meta(bh);
1051	clear_buffer_prio(bh);
1052	return ret;
1053}
1054
1055/*
1056 * We need to pick up the new inode size which generic_commit_write gave us
1057 * `file' can be NULL - eg, when called from page_symlink().
1058 *
1059 * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1060 * buffers are managed internally.
1061 */
1062static int ext4_write_end(struct file *file,
1063			  struct address_space *mapping,
1064			  loff_t pos, unsigned len, unsigned copied,
1065			  struct page *page, void *fsdata)
1066{
 
 
1067	handle_t *handle = ext4_journal_current_handle();
1068	struct inode *inode = mapping->host;
1069	int ret = 0, ret2;
1070	int i_size_changed = 0;
1071
1072	trace_ext4_write_end(inode, pos, len, copied);
1073	if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
1074		ret = ext4_jbd2_file_inode(handle, inode);
1075		if (ret) {
1076			unlock_page(page);
1077			page_cache_release(page);
1078			goto errout;
1079		}
1080	}
1081
1082	if (ext4_has_inline_data(inode)) {
1083		ret = ext4_write_inline_data_end(inode, pos, len,
1084						 copied, page);
1085		if (ret < 0)
1086			goto errout;
1087		copied = ret;
1088	} else
1089		copied = block_write_end(file, mapping, pos,
1090					 len, copied, page, fsdata);
1091
1092	/*
1093	 * No need to use i_size_read() here, the i_size
1094	 * cannot change under us because we hole i_mutex.
1095	 *
1096	 * But it's important to update i_size while still holding page lock:
1097	 * page writeout could otherwise come in and zero beyond i_size.
1098	 */
1099	if (pos + copied > inode->i_size) {
1100		i_size_write(inode, pos + copied);
1101		i_size_changed = 1;
1102	}
1103
1104	if (pos + copied > EXT4_I(inode)->i_disksize) {
1105		/* We need to mark inode dirty even if
1106		 * new_i_size is less that inode->i_size
1107		 * but greater than i_disksize. (hint delalloc)
1108		 */
1109		ext4_update_i_disksize(inode, (pos + copied));
1110		i_size_changed = 1;
1111	}
1112	unlock_page(page);
1113	page_cache_release(page);
1114
1115	/*
1116	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1117	 * makes the holding time of page lock longer. Second, it forces lock
1118	 * ordering of page lock and transaction start for journaling
1119	 * filesystems.
1120	 */
1121	if (i_size_changed)
1122		ext4_mark_inode_dirty(handle, inode);
1123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1125		/* if we have allocated more blocks and copied
1126		 * less. We will have blocks allocated outside
1127		 * inode->i_size. So truncate them
1128		 */
1129		ext4_orphan_add(handle, inode);
1130errout:
 
 
 
1131	ret2 = ext4_journal_stop(handle);
1132	if (!ret)
1133		ret = ret2;
1134
1135	if (pos + len > inode->i_size) {
1136		ext4_truncate_failed_write(inode);
1137		/*
1138		 * If truncate failed early the inode might still be
1139		 * on the orphan list; we need to make sure the inode
1140		 * is removed from the orphan list in that case.
1141		 */
1142		if (inode->i_nlink)
1143			ext4_orphan_del(NULL, inode);
1144	}
1145
1146	return ret ? ret : copied;
1147}
1148
1149static int ext4_journalled_write_end(struct file *file,
1150				     struct address_space *mapping,
1151				     loff_t pos, unsigned len, unsigned copied,
1152				     struct page *page, void *fsdata)
1153{
1154	handle_t *handle = ext4_journal_current_handle();
1155	struct inode *inode = mapping->host;
1156	int ret = 0, ret2;
1157	int partial = 0;
1158	unsigned from, to;
1159	loff_t new_i_size;
1160
1161	trace_ext4_journalled_write_end(inode, pos, len, copied);
1162	from = pos & (PAGE_CACHE_SIZE - 1);
1163	to = from + len;
1164
1165	BUG_ON(!ext4_handle_valid(handle));
1166
1167	if (ext4_has_inline_data(inode))
1168		copied = ext4_write_inline_data_end(inode, pos, len,
1169						    copied, page);
1170	else {
1171		if (copied < len) {
1172			if (!PageUptodate(page))
1173				copied = 0;
1174			page_zero_new_buffers(page, from+copied, to);
1175		}
1176
1177		ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1178					     to, &partial, write_end_fn);
1179		if (!partial)
1180			SetPageUptodate(page);
1181	}
1182	new_i_size = pos + copied;
1183	if (new_i_size > inode->i_size)
1184		i_size_write(inode, pos+copied);
1185	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1186	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1187	if (new_i_size > EXT4_I(inode)->i_disksize) {
1188		ext4_update_i_disksize(inode, new_i_size);
1189		ret2 = ext4_mark_inode_dirty(handle, inode);
1190		if (!ret)
1191			ret = ret2;
1192	}
1193
1194	unlock_page(page);
1195	page_cache_release(page);
1196	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1197		/* if we have allocated more blocks and copied
1198		 * less. We will have blocks allocated outside
1199		 * inode->i_size. So truncate them
1200		 */
1201		ext4_orphan_add(handle, inode);
1202
1203	ret2 = ext4_journal_stop(handle);
1204	if (!ret)
1205		ret = ret2;
1206	if (pos + len > inode->i_size) {
1207		ext4_truncate_failed_write(inode);
1208		/*
1209		 * If truncate failed early the inode might still be
1210		 * on the orphan list; we need to make sure the inode
1211		 * is removed from the orphan list in that case.
1212		 */
1213		if (inode->i_nlink)
1214			ext4_orphan_del(NULL, inode);
1215	}
1216
1217	return ret ? ret : copied;
1218}
1219
1220/*
1221 * Reserve a metadata for a single block located at lblock
1222 */
1223static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1224{
1225	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1226	struct ext4_inode_info *ei = EXT4_I(inode);
1227	unsigned int md_needed;
1228	ext4_lblk_t save_last_lblock;
1229	int save_len;
1230
1231	/*
1232	 * recalculate the amount of metadata blocks to reserve
1233	 * in order to allocate nrblocks
1234	 * worse case is one extent per block
1235	 */
1236	spin_lock(&ei->i_block_reservation_lock);
1237	/*
1238	 * ext4_calc_metadata_amount() has side effects, which we have
1239	 * to be prepared undo if we fail to claim space.
1240	 */
1241	save_len = ei->i_da_metadata_calc_len;
1242	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1243	md_needed = EXT4_NUM_B2C(sbi,
1244				 ext4_calc_metadata_amount(inode, lblock));
1245	trace_ext4_da_reserve_space(inode, md_needed);
1246
1247	/*
1248	 * We do still charge estimated metadata to the sb though;
1249	 * we cannot afford to run out of free blocks.
1250	 */
1251	if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
1252		ei->i_da_metadata_calc_len = save_len;
1253		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1254		spin_unlock(&ei->i_block_reservation_lock);
1255		return -ENOSPC;
1256	}
1257	ei->i_reserved_meta_blocks += md_needed;
1258	spin_unlock(&ei->i_block_reservation_lock);
1259
1260	return 0;       /* success */
1261}
1262
1263/*
1264 * Reserve a single cluster located at lblock
1265 */
1266static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1267{
 
1268	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1269	struct ext4_inode_info *ei = EXT4_I(inode);
1270	unsigned int md_needed;
1271	int ret;
1272	ext4_lblk_t save_last_lblock;
1273	int save_len;
1274
1275	/*
1276	 * We will charge metadata quota at writeout time; this saves
1277	 * us from metadata over-estimation, though we may go over by
1278	 * a small amount in the end.  Here we just reserve for data.
1279	 */
1280	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1281	if (ret)
1282		return ret;
1283
1284	/*
1285	 * recalculate the amount of metadata blocks to reserve
1286	 * in order to allocate nrblocks
1287	 * worse case is one extent per block
1288	 */
 
1289	spin_lock(&ei->i_block_reservation_lock);
1290	/*
1291	 * ext4_calc_metadata_amount() has side effects, which we have
1292	 * to be prepared undo if we fail to claim space.
1293	 */
1294	save_len = ei->i_da_metadata_calc_len;
1295	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1296	md_needed = EXT4_NUM_B2C(sbi,
1297				 ext4_calc_metadata_amount(inode, lblock));
1298	trace_ext4_da_reserve_space(inode, md_needed);
1299
1300	/*
1301	 * We do still charge estimated metadata to the sb though;
1302	 * we cannot afford to run out of free blocks.
1303	 */
1304	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1305		ei->i_da_metadata_calc_len = save_len;
1306		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1307		spin_unlock(&ei->i_block_reservation_lock);
 
 
 
 
1308		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1309		return -ENOSPC;
1310	}
1311	ei->i_reserved_data_blocks++;
1312	ei->i_reserved_meta_blocks += md_needed;
1313	spin_unlock(&ei->i_block_reservation_lock);
1314
1315	return 0;       /* success */
1316}
1317
1318static void ext4_da_release_space(struct inode *inode, int to_free)
1319{
1320	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1321	struct ext4_inode_info *ei = EXT4_I(inode);
1322
1323	if (!to_free)
1324		return;		/* Nothing to release, exit */
1325
1326	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1327
1328	trace_ext4_da_release_space(inode, to_free);
1329	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1330		/*
1331		 * if there aren't enough reserved blocks, then the
1332		 * counter is messed up somewhere.  Since this
1333		 * function is called from invalidate page, it's
1334		 * harmless to return without any action.
1335		 */
1336		ext4_warning(inode->i_sb, "ext4_da_release_space: "
1337			 "ino %lu, to_free %d with only %d reserved "
1338			 "data blocks", inode->i_ino, to_free,
1339			 ei->i_reserved_data_blocks);
1340		WARN_ON(1);
1341		to_free = ei->i_reserved_data_blocks;
1342	}
1343	ei->i_reserved_data_blocks -= to_free;
1344
1345	if (ei->i_reserved_data_blocks == 0) {
1346		/*
1347		 * We can release all of the reserved metadata blocks
1348		 * only when we have written all of the delayed
1349		 * allocation blocks.
1350		 * Note that in case of bigalloc, i_reserved_meta_blocks,
1351		 * i_reserved_data_blocks, etc. refer to number of clusters.
1352		 */
1353		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1354				   ei->i_reserved_meta_blocks);
1355		ei->i_reserved_meta_blocks = 0;
1356		ei->i_da_metadata_calc_len = 0;
1357	}
1358
1359	/* update fs dirty data blocks counter */
1360	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1361
1362	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1363
1364	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1365}
1366
1367static void ext4_da_page_release_reservation(struct page *page,
1368					     unsigned int offset,
1369					     unsigned int length)
1370{
1371	int to_release = 0;
1372	struct buffer_head *head, *bh;
1373	unsigned int curr_off = 0;
1374	struct inode *inode = page->mapping->host;
1375	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1376	unsigned int stop = offset + length;
1377	int num_clusters;
1378	ext4_fsblk_t lblk;
1379
1380	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
1381
1382	head = page_buffers(page);
1383	bh = head;
1384	do {
1385		unsigned int next_off = curr_off + bh->b_size;
1386
1387		if (next_off > stop)
1388			break;
1389
1390		if ((offset <= curr_off) && (buffer_delay(bh))) {
1391			to_release++;
1392			clear_buffer_delay(bh);
 
1393		}
1394		curr_off = next_off;
1395	} while ((bh = bh->b_this_page) != head);
1396
1397	if (to_release) {
1398		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1399		ext4_es_remove_extent(inode, lblk, to_release);
1400	}
1401
1402	/* If we have released all the blocks belonging to a cluster, then we
1403	 * need to release the reserved space for that cluster. */
1404	num_clusters = EXT4_NUM_B2C(sbi, to_release);
1405	while (num_clusters > 0) {
 
1406		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1407			((num_clusters - 1) << sbi->s_cluster_bits);
1408		if (sbi->s_cluster_ratio == 1 ||
1409		    !ext4_find_delalloc_cluster(inode, lblk))
1410			ext4_da_release_space(inode, 1);
1411
1412		num_clusters--;
1413	}
1414}
1415
1416/*
1417 * Delayed allocation stuff
1418 */
1419
1420struct mpage_da_data {
1421	struct inode *inode;
1422	struct writeback_control *wbc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423
1424	pgoff_t first_page;	/* The first page to write */
1425	pgoff_t next_page;	/* Current page to examine */
1426	pgoff_t last_page;	/* Last page to examine */
1427	/*
1428	 * Extent to map - this can be after first_page because that can be
1429	 * fully mapped. We somewhat abuse m_flags to store whether the extent
1430	 * is delalloc or unwritten.
1431	 */
1432	struct ext4_map_blocks map;
1433	struct ext4_io_submit io_submit;	/* IO submission data */
1434};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1435
1436static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1437				       bool invalidate)
1438{
1439	int nr_pages, i;
1440	pgoff_t index, end;
1441	struct pagevec pvec;
1442	struct inode *inode = mpd->inode;
1443	struct address_space *mapping = inode->i_mapping;
1444
1445	/* This is necessary when next_page == 0. */
1446	if (mpd->first_page >= mpd->next_page)
1447		return;
1448
1449	index = mpd->first_page;
1450	end   = mpd->next_page - 1;
1451	if (invalidate) {
1452		ext4_lblk_t start, last;
1453		start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1454		last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1455		ext4_es_remove_extent(inode, start, last - start + 1);
1456	}
1457
1458	pagevec_init(&pvec, 0);
1459	while (index <= end) {
1460		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1461		if (nr_pages == 0)
1462			break;
1463		for (i = 0; i < nr_pages; i++) {
1464			struct page *page = pvec.pages[i];
1465			if (page->index > end)
1466				break;
1467			BUG_ON(!PageLocked(page));
1468			BUG_ON(PageWriteback(page));
1469			if (invalidate) {
1470				block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
1471				ClearPageUptodate(page);
1472			}
1473			unlock_page(page);
1474		}
1475		index = pvec.pages[nr_pages - 1]->index + 1;
1476		pagevec_release(&pvec);
1477	}
 
1478}
1479
1480static void ext4_print_free_blocks(struct inode *inode)
1481{
1482	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1483	struct super_block *sb = inode->i_sb;
1484	struct ext4_inode_info *ei = EXT4_I(inode);
1485
1486	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1487	       EXT4_C2B(EXT4_SB(inode->i_sb),
1488			ext4_count_free_clusters(sb)));
1489	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1490	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1491	       (long long) EXT4_C2B(EXT4_SB(sb),
1492		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1493	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1494	       (long long) EXT4_C2B(EXT4_SB(sb),
1495		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1496	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1497	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1498		 ei->i_reserved_data_blocks);
1499	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1500	       ei->i_reserved_meta_blocks);
1501	ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
1502	       ei->i_allocated_meta_blocks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1503	return;
1504}
1505
1506static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1507{
1508	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1509}
1510
1511/*
1512 * This function is grabs code from the very beginning of
1513 * ext4_map_blocks, but assumes that the caller is from delayed write
1514 * time. This function looks up the requested blocks and sets the
1515 * buffer delay bit under the protection of i_data_sem.
1516 */
1517static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1518			      struct ext4_map_blocks *map,
1519			      struct buffer_head *bh)
1520{
1521	struct extent_status es;
1522	int retval;
1523	sector_t invalid_block = ~((sector_t) 0xffff);
1524#ifdef ES_AGGRESSIVE_TEST
1525	struct ext4_map_blocks orig_map;
1526
1527	memcpy(&orig_map, map, sizeof(*map));
1528#endif
1529
1530	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1531		invalid_block = ~0;
1532
1533	map->m_flags = 0;
1534	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1535		  "logical block %lu\n", inode->i_ino, map->m_len,
1536		  (unsigned long) map->m_lblk);
1537
1538	/* Lookup extent status tree firstly */
1539	if (ext4_es_lookup_extent(inode, iblock, &es)) {
1540		ext4_es_lru_add(inode);
1541		if (ext4_es_is_hole(&es)) {
1542			retval = 0;
1543			down_read((&EXT4_I(inode)->i_data_sem));
1544			goto add_delayed;
1545		}
1546
1547		/*
1548		 * Delayed extent could be allocated by fallocate.
1549		 * So we need to check it.
1550		 */
1551		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1552			map_bh(bh, inode->i_sb, invalid_block);
1553			set_buffer_new(bh);
1554			set_buffer_delay(bh);
1555			return 0;
1556		}
1557
1558		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1559		retval = es.es_len - (iblock - es.es_lblk);
1560		if (retval > map->m_len)
1561			retval = map->m_len;
1562		map->m_len = retval;
1563		if (ext4_es_is_written(&es))
1564			map->m_flags |= EXT4_MAP_MAPPED;
1565		else if (ext4_es_is_unwritten(&es))
1566			map->m_flags |= EXT4_MAP_UNWRITTEN;
1567		else
1568			BUG_ON(1);
1569
1570#ifdef ES_AGGRESSIVE_TEST
1571		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1572#endif
1573		return retval;
1574	}
1575
1576	/*
1577	 * Try to see if we can get the block without requesting a new
1578	 * file system block.
1579	 */
1580	down_read((&EXT4_I(inode)->i_data_sem));
1581	if (ext4_has_inline_data(inode)) {
1582		/*
1583		 * We will soon create blocks for this page, and let
1584		 * us pretend as if the blocks aren't allocated yet.
1585		 * In case of clusters, we have to handle the work
1586		 * of mapping from cluster so that the reserved space
1587		 * is calculated properly.
1588		 */
1589		if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
1590		    ext4_find_delalloc_cluster(inode, map->m_lblk))
1591			map->m_flags |= EXT4_MAP_FROM_CLUSTER;
1592		retval = 0;
1593	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1594		retval = ext4_ext_map_blocks(NULL, inode, map,
1595					     EXT4_GET_BLOCKS_NO_PUT_HOLE);
1596	else
1597		retval = ext4_ind_map_blocks(NULL, inode, map,
1598					     EXT4_GET_BLOCKS_NO_PUT_HOLE);
1599
1600add_delayed:
1601	if (retval == 0) {
1602		int ret;
1603		/*
1604		 * XXX: __block_prepare_write() unmaps passed block,
1605		 * is it OK?
1606		 */
1607		/*
1608		 * If the block was allocated from previously allocated cluster,
1609		 * then we don't need to reserve it again. However we still need
1610		 * to reserve metadata for every block we're going to write.
1611		 */
1612		if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1613			ret = ext4_da_reserve_space(inode, iblock);
1614			if (ret) {
1615				/* not enough space to reserve */
1616				retval = ret;
1617				goto out_unlock;
1618			}
1619		} else {
1620			ret = ext4_da_reserve_metadata(inode, iblock);
1621			if (ret) {
1622				/* not enough space to reserve */
1623				retval = ret;
1624				goto out_unlock;
1625			}
1626		}
1627
1628		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1629					    ~0, EXTENT_STATUS_DELAYED);
1630		if (ret) {
1631			retval = ret;
1632			goto out_unlock;
1633		}
1634
1635		/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1636		 * and it should not appear on the bh->b_state.
1637		 */
1638		map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1639
1640		map_bh(bh, inode->i_sb, invalid_block);
1641		set_buffer_new(bh);
1642		set_buffer_delay(bh);
1643	} else if (retval > 0) {
1644		int ret;
1645		unsigned int status;
1646
1647		if (unlikely(retval != map->m_len)) {
1648			ext4_warning(inode->i_sb,
1649				     "ES len assertion failed for inode "
1650				     "%lu: retval %d != map->m_len %d",
1651				     inode->i_ino, retval, map->m_len);
1652			WARN_ON(1);
1653		}
1654
1655		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1656				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1657		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1658					    map->m_pblk, status);
1659		if (ret != 0)
1660			retval = ret;
1661	}
1662
1663out_unlock:
1664	up_read((&EXT4_I(inode)->i_data_sem));
1665
1666	return retval;
1667}
1668
1669/*
1670 * This is a special get_blocks_t callback which is used by
1671 * ext4_da_write_begin().  It will either return mapped block or
1672 * reserve space for a single block.
1673 *
1674 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1675 * We also have b_blocknr = -1 and b_bdev initialized properly
1676 *
1677 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1678 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1679 * initialized properly.
1680 */
1681int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1682			   struct buffer_head *bh, int create)
1683{
1684	struct ext4_map_blocks map;
1685	int ret = 0;
1686
1687	BUG_ON(create == 0);
1688	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1689
1690	map.m_lblk = iblock;
1691	map.m_len = 1;
1692
1693	/*
1694	 * first, we need to know whether the block is allocated already
1695	 * preallocated blocks are unmapped but should treated
1696	 * the same as allocated blocks.
1697	 */
1698	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1699	if (ret <= 0)
1700		return ret;
1701
1702	map_bh(bh, inode->i_sb, map.m_pblk);
1703	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1704
1705	if (buffer_unwritten(bh)) {
1706		/* A delayed write to unwritten bh should be marked
1707		 * new and mapped.  Mapped ensures that we don't do
1708		 * get_block multiple times when we write to the same
1709		 * offset and new ensures that we do proper zero out
1710		 * for partial write.
1711		 */
1712		set_buffer_new(bh);
1713		set_buffer_mapped(bh);
1714	}
1715	return 0;
1716}
1717
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1718static int bget_one(handle_t *handle, struct buffer_head *bh)
1719{
1720	get_bh(bh);
1721	return 0;
1722}
1723
1724static int bput_one(handle_t *handle, struct buffer_head *bh)
1725{
1726	put_bh(bh);
1727	return 0;
1728}
1729
1730static int __ext4_journalled_writepage(struct page *page,
1731				       unsigned int len)
1732{
1733	struct address_space *mapping = page->mapping;
1734	struct inode *inode = mapping->host;
1735	struct buffer_head *page_bufs = NULL;
1736	handle_t *handle = NULL;
1737	int ret = 0, err = 0;
1738	int inline_data = ext4_has_inline_data(inode);
1739	struct buffer_head *inode_bh = NULL;
1740
1741	ClearPageChecked(page);
1742
1743	if (inline_data) {
1744		BUG_ON(page->index != 0);
1745		BUG_ON(len > ext4_get_max_inline_size(inode));
1746		inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1747		if (inode_bh == NULL)
1748			goto out;
1749	} else {
1750		page_bufs = page_buffers(page);
1751		if (!page_bufs) {
1752			BUG();
1753			goto out;
1754		}
1755		ext4_walk_page_buffers(handle, page_bufs, 0, len,
1756				       NULL, bget_one);
1757	}
1758	/* As soon as we unlock the page, it can go away, but we have
1759	 * references to buffers so we are safe */
1760	unlock_page(page);
1761
1762	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1763				    ext4_writepage_trans_blocks(inode));
1764	if (IS_ERR(handle)) {
1765		ret = PTR_ERR(handle);
1766		goto out;
1767	}
1768
1769	BUG_ON(!ext4_handle_valid(handle));
1770
1771	if (inline_data) {
1772		ret = ext4_journal_get_write_access(handle, inode_bh);
1773
1774		err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
1775
1776	} else {
1777		ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1778					     do_journal_get_write_access);
1779
1780		err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1781					     write_end_fn);
1782	}
1783	if (ret == 0)
1784		ret = err;
1785	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1786	err = ext4_journal_stop(handle);
1787	if (!ret)
1788		ret = err;
1789
1790	if (!ext4_has_inline_data(inode))
1791		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1792				       NULL, bput_one);
1793	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1794out:
1795	brelse(inode_bh);
1796	return ret;
1797}
1798
 
 
 
1799/*
1800 * Note that we don't need to start a transaction unless we're journaling data
1801 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1802 * need to file the inode to the transaction's list in ordered mode because if
1803 * we are writing back data added by write(), the inode is already there and if
1804 * we are writing back data modified via mmap(), no one guarantees in which
1805 * transaction the data will hit the disk. In case we are journaling data, we
1806 * cannot start transaction directly because transaction start ranks above page
1807 * lock so we have to do some magic.
1808 *
1809 * This function can get called via...
1810 *   - ext4_writepages after taking page lock (have journal handle)
1811 *   - journal_submit_inode_data_buffers (no journal handle)
1812 *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1813 *   - grab_page_cache when doing write_begin (have journal handle)
1814 *
1815 * We don't do any block allocation in this function. If we have page with
1816 * multiple blocks we need to write those buffer_heads that are mapped. This
1817 * is important for mmaped based write. So if we do with blocksize 1K
1818 * truncate(f, 1024);
1819 * a = mmap(f, 0, 4096);
1820 * a[0] = 'a';
1821 * truncate(f, 4096);
1822 * we have in the page first buffer_head mapped via page_mkwrite call back
1823 * but other buffer_heads would be unmapped but dirty (dirty done via the
1824 * do_wp_page). So writepage should write the first block. If we modify
1825 * the mmap area beyond 1024 we will again get a page_fault and the
1826 * page_mkwrite callback will do the block allocation and mark the
1827 * buffer_heads mapped.
1828 *
1829 * We redirty the page if we have any buffer_heads that is either delay or
1830 * unwritten in the page.
1831 *
1832 * We can get recursively called as show below.
1833 *
1834 *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1835 *		ext4_writepage()
1836 *
1837 * But since we don't do any block allocation we should not deadlock.
1838 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1839 */
1840static int ext4_writepage(struct page *page,
1841			  struct writeback_control *wbc)
1842{
1843	int ret = 0;
1844	loff_t size;
1845	unsigned int len;
1846	struct buffer_head *page_bufs = NULL;
1847	struct inode *inode = page->mapping->host;
1848	struct ext4_io_submit io_submit;
1849
1850	trace_ext4_writepage(page);
1851	size = i_size_read(inode);
1852	if (page->index == size >> PAGE_CACHE_SHIFT)
1853		len = size & ~PAGE_CACHE_MASK;
1854	else
1855		len = PAGE_CACHE_SIZE;
1856
1857	page_bufs = page_buffers(page);
1858	/*
1859	 * We cannot do block allocation or other extent handling in this
1860	 * function. If there are buffers needing that, we have to redirty
1861	 * the page. But we may reach here when we do a journal commit via
1862	 * journal_submit_inode_data_buffers() and in that case we must write
1863	 * allocated buffers to achieve data=ordered mode guarantees.
1864	 */
1865	if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
1866				   ext4_bh_delay_or_unwritten)) {
1867		redirty_page_for_writepage(wbc, page);
1868		if (current->flags & PF_MEMALLOC) {
1869			/*
1870			 * For memory cleaning there's no point in writing only
1871			 * some buffers. So just bail out. Warn if we came here
1872			 * from direct reclaim.
1873			 */
1874			WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
1875							== PF_MEMALLOC);
1876			unlock_page(page);
1877			return 0;
1878		}
 
1879	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1880
1881	if (PageChecked(page) && ext4_should_journal_data(inode))
1882		/*
1883		 * It's mmapped pagecache.  Add buffers and journal it.  There
1884		 * doesn't seem much point in redirtying the page here.
1885		 */
1886		return __ext4_journalled_writepage(page, len);
1887
1888	ext4_io_submit_init(&io_submit, wbc);
1889	io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
1890	if (!io_submit.io_end) {
1891		redirty_page_for_writepage(wbc, page);
1892		unlock_page(page);
1893		return -ENOMEM;
1894	}
1895	ret = ext4_bio_write_page(&io_submit, page, len, wbc);
1896	ext4_io_submit(&io_submit);
1897	/* Drop io_end reference we got from init */
1898	ext4_put_io_end_defer(io_submit.io_end);
1899	return ret;
1900}
1901
1902static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
1903{
1904	int len;
1905	loff_t size = i_size_read(mpd->inode);
1906	int err;
1907
1908	BUG_ON(page->index != mpd->first_page);
1909	if (page->index == size >> PAGE_CACHE_SHIFT)
1910		len = size & ~PAGE_CACHE_MASK;
1911	else
1912		len = PAGE_CACHE_SIZE;
1913	clear_page_dirty_for_io(page);
1914	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
1915	if (!err)
1916		mpd->wbc->nr_to_write--;
1917	mpd->first_page++;
1918
1919	return err;
1920}
1921
1922#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
1923
1924/*
1925 * mballoc gives us at most this number of blocks...
1926 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1927 * The rest of mballoc seems to handle chunks up to full group size.
 
 
1928 */
1929#define MAX_WRITEPAGES_EXTENT_LEN 2048
1930
1931/*
1932 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1933 *
1934 * @mpd - extent of blocks
1935 * @lblk - logical number of the block in the file
1936 * @bh - buffer head we want to add to the extent
1937 *
1938 * The function is used to collect contig. blocks in the same state. If the
1939 * buffer doesn't require mapping for writeback and we haven't started the
1940 * extent of buffers to map yet, the function returns 'true' immediately - the
1941 * caller can write the buffer right away. Otherwise the function returns true
1942 * if the block has been added to the extent, false if the block couldn't be
1943 * added.
1944 */
1945static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1946				   struct buffer_head *bh)
1947{
1948	struct ext4_map_blocks *map = &mpd->map;
1949
1950	/* Buffer that doesn't need mapping for writeback? */
1951	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
1952	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1953		/* So far no extent to map => we write the buffer right away */
1954		if (map->m_len == 0)
1955			return true;
1956		return false;
1957	}
1958
1959	/* First block in the extent? */
1960	if (map->m_len == 0) {
1961		map->m_lblk = lblk;
1962		map->m_len = 1;
1963		map->m_flags = bh->b_state & BH_FLAGS;
1964		return true;
1965	}
1966
1967	/* Don't go larger than mballoc is willing to allocate */
1968	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1969		return false;
1970
1971	/* Can we merge the block to our big extent? */
1972	if (lblk == map->m_lblk + map->m_len &&
1973	    (bh->b_state & BH_FLAGS) == map->m_flags) {
1974		map->m_len++;
1975		return true;
1976	}
1977	return false;
1978}
1979
1980/*
1981 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1982 *
1983 * @mpd - extent of blocks for mapping
1984 * @head - the first buffer in the page
1985 * @bh - buffer we should start processing from
1986 * @lblk - logical number of the block in the file corresponding to @bh
1987 *
1988 * Walk through page buffers from @bh upto @head (exclusive) and either submit
1989 * the page for IO if all buffers in this page were mapped and there's no
1990 * accumulated extent of buffers to map or add buffers in the page to the
1991 * extent of buffers to map. The function returns 1 if the caller can continue
1992 * by processing the next page, 0 if it should stop adding buffers to the
1993 * extent to map because we cannot extend it anymore. It can also return value
1994 * < 0 in case of error during IO submission.
1995 */
1996static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1997				   struct buffer_head *head,
1998				   struct buffer_head *bh,
1999				   ext4_lblk_t lblk)
2000{
2001	struct inode *inode = mpd->inode;
2002	int err;
2003	ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
2004							>> inode->i_blkbits;
2005
2006	do {
2007		BUG_ON(buffer_locked(bh));
2008
2009		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2010			/* Found extent to map? */
2011			if (mpd->map.m_len)
2012				return 0;
2013			/* Everything mapped so far and we hit EOF */
2014			break;
2015		}
2016	} while (lblk++, (bh = bh->b_this_page) != head);
2017	/* So far everything mapped? Submit the page for IO. */
2018	if (mpd->map.m_len == 0) {
2019		err = mpage_submit_page(mpd, head->b_page);
2020		if (err < 0)
2021			return err;
2022	}
2023	return lblk < blocks;
2024}
2025
2026/*
2027 * mpage_map_buffers - update buffers corresponding to changed extent and
2028 *		       submit fully mapped pages for IO
2029 *
2030 * @mpd - description of extent to map, on return next extent to map
2031 *
2032 * Scan buffers corresponding to changed extent (we expect corresponding pages
2033 * to be already locked) and update buffer state according to new extent state.
2034 * We map delalloc buffers to their physical location, clear unwritten bits,
2035 * and mark buffers as uninit when we perform writes to uninitialized extents
2036 * and do extent conversion after IO is finished. If the last page is not fully
2037 * mapped, we update @map to the next extent in the last page that needs
2038 * mapping. Otherwise we submit the page for IO.
2039 */
2040static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2041{
2042	struct pagevec pvec;
2043	int nr_pages, i;
2044	struct inode *inode = mpd->inode;
2045	struct buffer_head *head, *bh;
2046	int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
2047	pgoff_t start, end;
2048	ext4_lblk_t lblk;
2049	sector_t pblock;
2050	int err;
2051
2052	start = mpd->map.m_lblk >> bpp_bits;
2053	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2054	lblk = start << bpp_bits;
2055	pblock = mpd->map.m_pblk;
2056
2057	pagevec_init(&pvec, 0);
2058	while (start <= end) {
2059		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
2060					  PAGEVEC_SIZE);
2061		if (nr_pages == 0)
2062			break;
2063		for (i = 0; i < nr_pages; i++) {
2064			struct page *page = pvec.pages[i];
2065
2066			if (page->index > end)
2067				break;
2068			/* Up to 'end' pages must be contiguous */
2069			BUG_ON(page->index != start);
2070			bh = head = page_buffers(page);
2071			do {
2072				if (lblk < mpd->map.m_lblk)
2073					continue;
2074				if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2075					/*
2076					 * Buffer after end of mapped extent.
2077					 * Find next buffer in the page to map.
2078					 */
2079					mpd->map.m_len = 0;
2080					mpd->map.m_flags = 0;
2081					/*
2082					 * FIXME: If dioread_nolock supports
2083					 * blocksize < pagesize, we need to make
2084					 * sure we add size mapped so far to
2085					 * io_end->size as the following call
2086					 * can submit the page for IO.
2087					 */
2088					err = mpage_process_page_bufs(mpd, head,
2089								      bh, lblk);
2090					pagevec_release(&pvec);
2091					if (err > 0)
2092						err = 0;
2093					return err;
2094				}
2095				if (buffer_delay(bh)) {
2096					clear_buffer_delay(bh);
2097					bh->b_blocknr = pblock++;
2098				}
2099				clear_buffer_unwritten(bh);
2100			} while (lblk++, (bh = bh->b_this_page) != head);
2101
2102			/*
2103			 * FIXME: This is going to break if dioread_nolock
2104			 * supports blocksize < pagesize as we will try to
2105			 * convert potentially unmapped parts of inode.
2106			 */
2107			mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
2108			/* Page fully mapped - let IO run! */
2109			err = mpage_submit_page(mpd, page);
2110			if (err < 0) {
2111				pagevec_release(&pvec);
2112				return err;
2113			}
2114			start++;
2115		}
2116		pagevec_release(&pvec);
2117	}
2118	/* Extent fully mapped and matches with page boundary. We are done. */
2119	mpd->map.m_len = 0;
2120	mpd->map.m_flags = 0;
2121	return 0;
2122}
2123
2124static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2125{
2126	struct inode *inode = mpd->inode;
2127	struct ext4_map_blocks *map = &mpd->map;
2128	int get_blocks_flags;
2129	int err;
2130
2131	trace_ext4_da_write_pages_extent(inode, map);
2132	/*
2133	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2134	 * to convert an uninitialized extent to be initialized (in the case
2135	 * where we have written into one or more preallocated blocks).  It is
2136	 * possible that we're going to need more metadata blocks than
2137	 * previously reserved. However we must not fail because we're in
2138	 * writeback and there is nothing we can do about it so it might result
2139	 * in data loss.  So use reserved blocks to allocate metadata if
2140	 * possible.
2141	 *
2142	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks
2143	 * in question are delalloc blocks.  This affects functions in many
2144	 * different parts of the allocation call path.  This flag exists
2145	 * primarily because we don't want to change *many* call functions, so
2146	 * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag
2147	 * once the inode's allocation semaphore is taken.
2148	 */
2149	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2150			   EXT4_GET_BLOCKS_METADATA_NOFAIL;
2151	if (ext4_should_dioread_nolock(inode))
2152		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2153	if (map->m_flags & (1 << BH_Delay))
2154		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2155
2156	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2157	if (err < 0)
2158		return err;
2159	if (map->m_flags & EXT4_MAP_UNINIT) {
2160		if (!mpd->io_submit.io_end->handle &&
2161		    ext4_handle_valid(handle)) {
2162			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2163			handle->h_rsv_handle = NULL;
2164		}
2165		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2166	}
2167
2168	BUG_ON(map->m_len == 0);
2169	if (map->m_flags & EXT4_MAP_NEW) {
2170		struct block_device *bdev = inode->i_sb->s_bdev;
2171		int i;
2172
2173		for (i = 0; i < map->m_len; i++)
2174			unmap_underlying_metadata(bdev, map->m_pblk + i);
2175	}
2176	return 0;
2177}
2178
2179/*
2180 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2181 *				 mpd->len and submit pages underlying it for IO
2182 *
2183 * @handle - handle for journal operations
2184 * @mpd - extent to map
2185 * @give_up_on_write - we set this to true iff there is a fatal error and there
2186 *                     is no hope of writing the data. The caller should discard
2187 *                     dirty pages to avoid infinite loops.
2188 *
2189 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2190 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2191 * them to initialized or split the described range from larger unwritten
2192 * extent. Note that we need not map all the described range since allocation
2193 * can return less blocks or the range is covered by more unwritten extents. We
2194 * cannot map more because we are limited by reserved transaction credits. On
2195 * the other hand we always make sure that the last touched page is fully
2196 * mapped so that it can be written out (and thus forward progress is
2197 * guaranteed). After mapping we submit all mapped pages for IO.
2198 */
2199static int mpage_map_and_submit_extent(handle_t *handle,
2200				       struct mpage_da_data *mpd,
2201				       bool *give_up_on_write)
2202{
2203	struct inode *inode = mpd->inode;
2204	struct ext4_map_blocks *map = &mpd->map;
2205	int err;
2206	loff_t disksize;
2207
2208	mpd->io_submit.io_end->offset =
2209				((loff_t)map->m_lblk) << inode->i_blkbits;
2210	do {
2211		err = mpage_map_one_extent(handle, mpd);
2212		if (err < 0) {
2213			struct super_block *sb = inode->i_sb;
2214
2215			if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2216				goto invalidate_dirty_pages;
2217			/*
2218			 * Let the uper layers retry transient errors.
2219			 * In the case of ENOSPC, if ext4_count_free_blocks()
2220			 * is non-zero, a commit should free up blocks.
2221			 */
2222			if ((err == -ENOMEM) ||
2223			    (err == -ENOSPC && ext4_count_free_clusters(sb)))
2224				return err;
2225			ext4_msg(sb, KERN_CRIT,
2226				 "Delayed block allocation failed for "
2227				 "inode %lu at logical offset %llu with"
2228				 " max blocks %u with error %d",
2229				 inode->i_ino,
2230				 (unsigned long long)map->m_lblk,
2231				 (unsigned)map->m_len, -err);
2232			ext4_msg(sb, KERN_CRIT,
2233				 "This should not happen!! Data will "
2234				 "be lost\n");
2235			if (err == -ENOSPC)
2236				ext4_print_free_blocks(inode);
2237		invalidate_dirty_pages:
2238			*give_up_on_write = true;
2239			return err;
2240		}
2241		/*
2242		 * Update buffer state, submit mapped pages, and get us new
2243		 * extent to map
2244		 */
2245		err = mpage_map_and_submit_buffers(mpd);
2246		if (err < 0)
2247			return err;
2248	} while (map->m_len);
2249
2250	/*
2251	 * Update on-disk size after IO is submitted.  Races with
2252	 * truncate are avoided by checking i_size under i_data_sem.
2253	 */
2254	disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
2255	if (disksize > EXT4_I(inode)->i_disksize) {
2256		int err2;
2257		loff_t i_size;
2258
2259		down_write(&EXT4_I(inode)->i_data_sem);
2260		i_size = i_size_read(inode);
2261		if (disksize > i_size)
2262			disksize = i_size;
2263		if (disksize > EXT4_I(inode)->i_disksize)
2264			EXT4_I(inode)->i_disksize = disksize;
2265		err2 = ext4_mark_inode_dirty(handle, inode);
2266		up_write(&EXT4_I(inode)->i_data_sem);
2267		if (err2)
2268			ext4_error(inode->i_sb,
2269				   "Failed to mark inode %lu dirty",
2270				   inode->i_ino);
2271		if (!err)
2272			err = err2;
2273	}
2274	return err;
2275}
2276
2277/*
2278 * Calculate the total number of credits to reserve for one writepages
2279 * iteration. This is called from ext4_writepages(). We map an extent of
2280 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2281 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2282 * bpp - 1 blocks in bpp different extents.
2283 */
2284static int ext4_da_writepages_trans_blocks(struct inode *inode)
 
 
 
2285{
2286	int bpp = ext4_journal_blocks_per_page(inode);
 
 
 
 
 
 
 
2287
2288	return ext4_meta_trans_blocks(inode,
2289				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2290}
 
 
 
2291
2292/*
2293 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2294 * 				 and underlying extent to map
2295 *
2296 * @mpd - where to look for pages
2297 *
2298 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2299 * IO immediately. When we find a page which isn't mapped we start accumulating
2300 * extent of buffers underlying these pages that needs mapping (formed by
2301 * either delayed or unwritten buffers). We also lock the pages containing
2302 * these buffers. The extent found is returned in @mpd structure (starting at
2303 * mpd->lblk with length mpd->len blocks).
2304 *
2305 * Note that this function can attach bios to one io_end structure which are
2306 * neither logically nor physically contiguous. Although it may seem as an
2307 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2308 * case as we need to track IO to all buffers underlying a page in one io_end.
2309 */
2310static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2311{
2312	struct address_space *mapping = mpd->inode->i_mapping;
2313	struct pagevec pvec;
2314	unsigned int nr_pages;
2315	long left = mpd->wbc->nr_to_write;
2316	pgoff_t index = mpd->first_page;
2317	pgoff_t end = mpd->last_page;
2318	int tag;
2319	int i, err = 0;
2320	int blkbits = mpd->inode->i_blkbits;
2321	ext4_lblk_t lblk;
2322	struct buffer_head *head;
2323
2324	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2325		tag = PAGECACHE_TAG_TOWRITE;
2326	else
2327		tag = PAGECACHE_TAG_DIRTY;
2328
2329	pagevec_init(&pvec, 0);
2330	mpd->map.m_len = 0;
2331	mpd->next_page = index;
2332	while (index <= end) {
2333		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2334			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2335		if (nr_pages == 0)
2336			goto out;
2337
2338		for (i = 0; i < nr_pages; i++) {
2339			struct page *page = pvec.pages[i];
2340
2341			/*
2342			 * At this point, the page may be truncated or
2343			 * invalidated (changing page->mapping to NULL), or
2344			 * even swizzled back from swapper_space to tmpfs file
2345			 * mapping. However, page->index will not change
2346			 * because we have a reference on the page.
2347			 */
2348			if (page->index > end)
2349				goto out;
2350
 
 
2351			/*
2352			 * Accumulated enough dirty pages? This doesn't apply
2353			 * to WB_SYNC_ALL mode. For integrity sync we have to
2354			 * keep going because someone may be concurrently
2355			 * dirtying pages, and we might have synced a lot of
2356			 * newly appeared dirty pages, but have not synced all
2357			 * of the old dirty pages.
2358			 */
2359			if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2360				goto out;
 
 
 
2361
2362			/* If we can't merge this page, we are done. */
2363			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2364				goto out;
2365
2366			lock_page(page);
2367			/*
2368			 * If the page is no longer dirty, or its mapping no
2369			 * longer corresponds to inode we are writing (which
2370			 * means it has been truncated or invalidated), or the
2371			 * page is already under writeback and we are not doing
2372			 * a data integrity writeback, skip the page
 
2373			 */
2374			if (!PageDirty(page) ||
2375			    (PageWriteback(page) &&
2376			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2377			    unlikely(page->mapping != mapping)) {
2378				unlock_page(page);
2379				continue;
2380			}
2381
2382			wait_on_page_writeback(page);
2383			BUG_ON(PageWriteback(page));
2384
2385			if (mpd->map.m_len == 0)
2386				mpd->first_page = page->index;
2387			mpd->next_page = page->index + 1;
2388			/* Add all dirty buffers to mpd */
2389			lblk = ((ext4_lblk_t)page->index) <<
2390				(PAGE_CACHE_SHIFT - blkbits);
2391			head = page_buffers(page);
2392			err = mpage_process_page_bufs(mpd, head, head, lblk);
2393			if (err <= 0)
2394				goto out;
2395			err = 0;
2396			left--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2397		}
2398		pagevec_release(&pvec);
2399		cond_resched();
2400	}
2401	return 0;
 
 
2402out:
2403	pagevec_release(&pvec);
2404	return err;
 
2405}
2406
2407static int __writepage(struct page *page, struct writeback_control *wbc,
2408		       void *data)
2409{
2410	struct address_space *mapping = data;
2411	int ret = ext4_writepage(page, wbc);
2412	mapping_set_error(mapping, ret);
2413	return ret;
2414}
2415
2416static int ext4_writepages(struct address_space *mapping,
2417			   struct writeback_control *wbc)
2418{
2419	pgoff_t	writeback_index = 0;
2420	long nr_to_write = wbc->nr_to_write;
2421	int range_whole = 0;
2422	int cycled = 1;
2423	handle_t *handle = NULL;
2424	struct mpage_da_data mpd;
2425	struct inode *inode = mapping->host;
2426	int needed_blocks, rsv_blocks = 0, ret = 0;
 
 
 
 
 
2427	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2428	bool done;
 
2429	struct blk_plug plug;
2430	bool give_up_on_write = false;
2431
2432	trace_ext4_writepages(inode, wbc);
2433
2434	/*
2435	 * No pages to write? This is mainly a kludge to avoid starting
2436	 * a transaction for special inodes like journal inode on last iput()
2437	 * because that could violate lock ordering on umount
2438	 */
2439	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2440		goto out_writepages;
2441
2442	if (ext4_should_journal_data(inode)) {
2443		struct blk_plug plug;
2444
2445		blk_start_plug(&plug);
2446		ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2447		blk_finish_plug(&plug);
2448		goto out_writepages;
2449	}
2450
2451	/*
2452	 * If the filesystem has aborted, it is read-only, so return
2453	 * right away instead of dumping stack traces later on that
2454	 * will obscure the real source of the problem.  We test
2455	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2456	 * the latter could be true if the filesystem is mounted
2457	 * read-only, and in that case, ext4_writepages should
2458	 * *never* be called, so if that ever happens, we would want
2459	 * the stack trace.
2460	 */
2461	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2462		ret = -EROFS;
2463		goto out_writepages;
2464	}
2465
2466	if (ext4_should_dioread_nolock(inode)) {
2467		/*
2468		 * We may need to convert up to one extent per block in
2469		 * the page and we may dirty the inode.
2470		 */
2471		rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
2472	}
2473
2474	/*
2475	 * If we have inline data and arrive here, it means that
2476	 * we will soon create the block for the 1st page, so
2477	 * we'd better clear the inline data here.
2478	 */
2479	if (ext4_has_inline_data(inode)) {
2480		/* Just inode will be modified... */
2481		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2482		if (IS_ERR(handle)) {
2483			ret = PTR_ERR(handle);
2484			goto out_writepages;
2485		}
2486		BUG_ON(ext4_test_inode_state(inode,
2487				EXT4_STATE_MAY_INLINE_DATA));
2488		ext4_destroy_inline_data(handle, inode);
2489		ext4_journal_stop(handle);
2490	}
2491
2492	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2493		range_whole = 1;
2494
 
2495	if (wbc->range_cyclic) {
2496		writeback_index = mapping->writeback_index;
2497		if (writeback_index)
2498			cycled = 0;
2499		mpd.first_page = writeback_index;
2500		mpd.last_page = -1;
 
 
2501	} else {
2502		mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
2503		mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2504	}
2505
2506	mpd.inode = inode;
2507	mpd.wbc = wbc;
2508	ext4_io_submit_init(&mpd.io_submit, wbc);
2509retry:
2510	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2511		tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2512	done = false;
2513	blk_start_plug(&plug);
2514	while (!done && mpd.first_page <= mpd.last_page) {
2515		/* For each extent of pages we use new io_end */
2516		mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2517		if (!mpd.io_submit.io_end) {
2518			ret = -ENOMEM;
2519			break;
2520		}
2521
2522		/*
2523		 * We have two constraints: We find one extent to map and we
2524		 * must always write out whole page (makes a difference when
2525		 * blocksize < pagesize) so that we don't block on IO when we
2526		 * try to write out the rest of the page. Journalled mode is
2527		 * not supported by delalloc.
2528		 */
2529		BUG_ON(ext4_should_journal_data(inode));
2530		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2531
2532		/* start a new transaction */
2533		handle = ext4_journal_start_with_reserve(inode,
2534				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2535		if (IS_ERR(handle)) {
2536			ret = PTR_ERR(handle);
2537			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2538			       "%ld pages, ino %lu; err %d", __func__,
2539				wbc->nr_to_write, inode->i_ino, ret);
2540			/* Release allocated io_end */
2541			ext4_put_io_end(mpd.io_submit.io_end);
2542			break;
2543		}
2544
2545		trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2546		ret = mpage_prepare_extent_to_map(&mpd);
2547		if (!ret) {
2548			if (mpd.map.m_len)
2549				ret = mpage_map_and_submit_extent(handle, &mpd,
2550					&give_up_on_write);
2551			else {
2552				/*
2553				 * We scanned the whole range (or exhausted
2554				 * nr_to_write), submitted what was mapped and
2555				 * didn't find anything needing mapping. We are
2556				 * done.
2557				 */
2558				done = true;
2559			}
2560		}
 
 
 
2561		ext4_journal_stop(handle);
2562		/* Submit prepared bio */
2563		ext4_io_submit(&mpd.io_submit);
2564		/* Unlock pages we didn't use */
2565		mpage_release_unused_pages(&mpd, give_up_on_write);
2566		/* Drop our io_end reference we got from init */
2567		ext4_put_io_end(mpd.io_submit.io_end);
2568
2569		if (ret == -ENOSPC && sbi->s_journal) {
2570			/*
2571			 * Commit the transaction which would
2572			 * free blocks released in the transaction
2573			 * and try again
2574			 */
2575			jbd2_journal_force_commit_nested(sbi->s_journal);
2576			ret = 0;
2577			continue;
2578		}
2579		/* Fatal error - ENOMEM, EIO... */
2580		if (ret)
 
 
 
 
 
 
 
 
 
 
 
2581			break;
2582	}
2583	blk_finish_plug(&plug);
2584	if (!ret && !cycled && wbc->nr_to_write > 0) {
2585		cycled = 1;
2586		mpd.last_page = writeback_index - 1;
2587		mpd.first_page = 0;
 
2588		goto retry;
2589	}
2590
2591	/* Update index */
 
2592	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2593		/*
2594		 * Set the writeback_index so that range_cyclic
2595		 * mode will write it back later
2596		 */
2597		mapping->writeback_index = mpd.first_page;
2598
2599out_writepages:
2600	trace_ext4_writepages_result(inode, wbc, ret,
2601				     nr_to_write - wbc->nr_to_write);
 
2602	return ret;
2603}
2604
 
2605static int ext4_nonda_switch(struct super_block *sb)
2606{
2607	s64 free_clusters, dirty_clusters;
2608	struct ext4_sb_info *sbi = EXT4_SB(sb);
2609
2610	/*
2611	 * switch to non delalloc mode if we are running low
2612	 * on free block. The free block accounting via percpu
2613	 * counters can get slightly wrong with percpu_counter_batch getting
2614	 * accumulated on each CPU without updating global counters
2615	 * Delalloc need an accurate free block accounting. So switch
2616	 * to non delalloc when we are near to error range.
2617	 */
2618	free_clusters =
2619		percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2620	dirty_clusters =
2621		percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2622	/*
2623	 * Start pushing delalloc when 1/2 of free blocks are dirty.
2624	 */
2625	if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2626		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2627
2628	if (2 * free_clusters < 3 * dirty_clusters ||
2629	    free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2630		/*
2631		 * free block count is less than 150% of dirty blocks
2632		 * or free blocks is less than watermark
2633		 */
2634		return 1;
2635	}
 
 
 
 
 
 
 
2636	return 0;
2637}
2638
2639static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2640			       loff_t pos, unsigned len, unsigned flags,
2641			       struct page **pagep, void **fsdata)
2642{
2643	int ret, retries = 0;
2644	struct page *page;
2645	pgoff_t index;
2646	struct inode *inode = mapping->host;
2647	handle_t *handle;
2648
2649	index = pos >> PAGE_CACHE_SHIFT;
2650
2651	if (ext4_nonda_switch(inode->i_sb)) {
2652		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2653		return ext4_write_begin(file, mapping, pos,
2654					len, flags, pagep, fsdata);
2655	}
2656	*fsdata = (void *)0;
2657	trace_ext4_da_write_begin(inode, pos, len, flags);
2658
2659	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2660		ret = ext4_da_write_inline_data_begin(mapping, inode,
2661						      pos, len, flags,
2662						      pagep, fsdata);
2663		if (ret < 0)
2664			return ret;
2665		if (ret == 1)
2666			return 0;
2667	}
2668
2669	/*
2670	 * grab_cache_page_write_begin() can take a long time if the
2671	 * system is thrashing due to memory pressure, or if the page
2672	 * is being written back.  So grab it first before we start
2673	 * the transaction handle.  This also allows us to allocate
2674	 * the page (if needed) without using GFP_NOFS.
2675	 */
2676retry_grab:
2677	page = grab_cache_page_write_begin(mapping, index, flags);
2678	if (!page)
2679		return -ENOMEM;
2680	unlock_page(page);
2681
2682	/*
2683	 * With delayed allocation, we don't log the i_disksize update
2684	 * if there is delayed block allocation. But we still need
2685	 * to journalling the i_disksize update if writes to the end
2686	 * of file which has an already mapped buffer.
2687	 */
2688retry_journal:
2689	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
2690	if (IS_ERR(handle)) {
2691		page_cache_release(page);
2692		return PTR_ERR(handle);
2693	}
 
 
 
2694
2695	lock_page(page);
2696	if (page->mapping != mapping) {
2697		/* The page got truncated from under us */
2698		unlock_page(page);
2699		page_cache_release(page);
2700		ext4_journal_stop(handle);
2701		goto retry_grab;
 
2702	}
2703	/* In case writeback began while the page was unlocked */
2704	wait_for_stable_page(page);
2705
2706	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2707	if (ret < 0) {
2708		unlock_page(page);
2709		ext4_journal_stop(handle);
 
2710		/*
2711		 * block_write_begin may have instantiated a few blocks
2712		 * outside i_size.  Trim these off again. Don't need
2713		 * i_size_read because we hold i_mutex.
2714		 */
2715		if (pos + len > inode->i_size)
2716			ext4_truncate_failed_write(inode);
2717
2718		if (ret == -ENOSPC &&
2719		    ext4_should_retry_alloc(inode->i_sb, &retries))
2720			goto retry_journal;
2721
2722		page_cache_release(page);
2723		return ret;
2724	}
2725
2726	*pagep = page;
 
 
2727	return ret;
2728}
2729
2730/*
2731 * Check if we should update i_disksize
2732 * when write to the end of file but not require block allocation
2733 */
2734static int ext4_da_should_update_i_disksize(struct page *page,
2735					    unsigned long offset)
2736{
2737	struct buffer_head *bh;
2738	struct inode *inode = page->mapping->host;
2739	unsigned int idx;
2740	int i;
2741
2742	bh = page_buffers(page);
2743	idx = offset >> inode->i_blkbits;
2744
2745	for (i = 0; i < idx; i++)
2746		bh = bh->b_this_page;
2747
2748	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2749		return 0;
2750	return 1;
2751}
2752
2753static int ext4_da_write_end(struct file *file,
2754			     struct address_space *mapping,
2755			     loff_t pos, unsigned len, unsigned copied,
2756			     struct page *page, void *fsdata)
2757{
2758	struct inode *inode = mapping->host;
2759	int ret = 0, ret2;
2760	handle_t *handle = ext4_journal_current_handle();
2761	loff_t new_i_size;
2762	unsigned long start, end;
2763	int write_mode = (int)(unsigned long)fsdata;
2764
2765	if (write_mode == FALL_BACK_TO_NONDELALLOC)
2766		return ext4_write_end(file, mapping, pos,
2767				      len, copied, page, fsdata);
 
 
 
 
 
 
 
 
 
2768
2769	trace_ext4_da_write_end(inode, pos, len, copied);
2770	start = pos & (PAGE_CACHE_SIZE - 1);
2771	end = start + copied - 1;
2772
2773	/*
2774	 * generic_write_end() will run mark_inode_dirty() if i_size
2775	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
2776	 * into that.
2777	 */
 
2778	new_i_size = pos + copied;
2779	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2780		if (ext4_has_inline_data(inode) ||
2781		    ext4_da_should_update_i_disksize(page, end)) {
2782			down_write(&EXT4_I(inode)->i_data_sem);
2783			if (new_i_size > EXT4_I(inode)->i_disksize)
 
 
 
 
 
 
 
 
2784				EXT4_I(inode)->i_disksize = new_i_size;
 
2785			up_write(&EXT4_I(inode)->i_data_sem);
2786			/* We need to mark inode dirty even if
2787			 * new_i_size is less that inode->i_size
2788			 * bu greater than i_disksize.(hint delalloc)
2789			 */
2790			ext4_mark_inode_dirty(handle, inode);
2791		}
2792	}
2793
2794	if (write_mode != CONVERT_INLINE_DATA &&
2795	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
2796	    ext4_has_inline_data(inode))
2797		ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
2798						     page);
2799	else
2800		ret2 = generic_write_end(file, mapping, pos, len, copied,
2801							page, fsdata);
2802
2803	copied = ret2;
2804	if (ret2 < 0)
2805		ret = ret2;
2806	ret2 = ext4_journal_stop(handle);
2807	if (!ret)
2808		ret = ret2;
2809
2810	return ret ? ret : copied;
2811}
2812
2813static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
2814				   unsigned int length)
2815{
2816	/*
2817	 * Drop reserved blocks
2818	 */
2819	BUG_ON(!PageLocked(page));
2820	if (!page_has_buffers(page))
2821		goto out;
2822
2823	ext4_da_page_release_reservation(page, offset, length);
2824
2825out:
2826	ext4_invalidatepage(page, offset, length);
2827
2828	return;
2829}
2830
2831/*
2832 * Force all delayed allocation blocks to be allocated for a given inode.
2833 */
2834int ext4_alloc_da_blocks(struct inode *inode)
2835{
2836	trace_ext4_alloc_da_blocks(inode);
2837
2838	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2839	    !EXT4_I(inode)->i_reserved_meta_blocks)
2840		return 0;
2841
2842	/*
2843	 * We do something simple for now.  The filemap_flush() will
2844	 * also start triggering a write of the data blocks, which is
2845	 * not strictly speaking necessary (and for users of
2846	 * laptop_mode, not even desirable).  However, to do otherwise
2847	 * would require replicating code paths in:
2848	 *
2849	 * ext4_writepages() ->
2850	 *    write_cache_pages() ---> (via passed in callback function)
2851	 *        __mpage_da_writepage() -->
2852	 *           mpage_add_bh_to_extent()
2853	 *           mpage_da_map_blocks()
2854	 *
2855	 * The problem is that write_cache_pages(), located in
2856	 * mm/page-writeback.c, marks pages clean in preparation for
2857	 * doing I/O, which is not desirable if we're not planning on
2858	 * doing I/O at all.
2859	 *
2860	 * We could call write_cache_pages(), and then redirty all of
2861	 * the pages by calling redirty_page_for_writepage() but that
2862	 * would be ugly in the extreme.  So instead we would need to
2863	 * replicate parts of the code in the above functions,
2864	 * simplifying them because we wouldn't actually intend to
2865	 * write out the pages, but rather only collect contiguous
2866	 * logical block extents, call the multi-block allocator, and
2867	 * then update the buffer heads with the block allocations.
2868	 *
2869	 * For now, though, we'll cheat by calling filemap_flush(),
2870	 * which will map the blocks, and start the I/O, but not
2871	 * actually wait for the I/O to complete.
2872	 */
2873	return filemap_flush(inode->i_mapping);
2874}
2875
2876/*
2877 * bmap() is special.  It gets used by applications such as lilo and by
2878 * the swapper to find the on-disk block of a specific piece of data.
2879 *
2880 * Naturally, this is dangerous if the block concerned is still in the
2881 * journal.  If somebody makes a swapfile on an ext4 data-journaling
2882 * filesystem and enables swap, then they may get a nasty shock when the
2883 * data getting swapped to that swapfile suddenly gets overwritten by
2884 * the original zero's written out previously to the journal and
2885 * awaiting writeback in the kernel's buffer cache.
2886 *
2887 * So, if we see any bmap calls here on a modified, data-journaled file,
2888 * take extra steps to flush any blocks which might be in the cache.
2889 */
2890static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2891{
2892	struct inode *inode = mapping->host;
2893	journal_t *journal;
2894	int err;
2895
2896	/*
2897	 * We can get here for an inline file via the FIBMAP ioctl
2898	 */
2899	if (ext4_has_inline_data(inode))
2900		return 0;
2901
2902	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2903			test_opt(inode->i_sb, DELALLOC)) {
2904		/*
2905		 * With delalloc we want to sync the file
2906		 * so that we can make sure we allocate
2907		 * blocks for file
2908		 */
2909		filemap_write_and_wait(mapping);
2910	}
2911
2912	if (EXT4_JOURNAL(inode) &&
2913	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2914		/*
2915		 * This is a REALLY heavyweight approach, but the use of
2916		 * bmap on dirty files is expected to be extremely rare:
2917		 * only if we run lilo or swapon on a freshly made file
2918		 * do we expect this to happen.
2919		 *
2920		 * (bmap requires CAP_SYS_RAWIO so this does not
2921		 * represent an unprivileged user DOS attack --- we'd be
2922		 * in trouble if mortal users could trigger this path at
2923		 * will.)
2924		 *
2925		 * NB. EXT4_STATE_JDATA is not set on files other than
2926		 * regular files.  If somebody wants to bmap a directory
2927		 * or symlink and gets confused because the buffer
2928		 * hasn't yet been flushed to disk, they deserve
2929		 * everything they get.
2930		 */
2931
2932		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2933		journal = EXT4_JOURNAL(inode);
2934		jbd2_journal_lock_updates(journal);
2935		err = jbd2_journal_flush(journal);
2936		jbd2_journal_unlock_updates(journal);
2937
2938		if (err)
2939			return 0;
2940	}
2941
2942	return generic_block_bmap(mapping, block, ext4_get_block);
2943}
2944
2945static int ext4_readpage(struct file *file, struct page *page)
2946{
2947	int ret = -EAGAIN;
2948	struct inode *inode = page->mapping->host;
2949
2950	trace_ext4_readpage(page);
2951
2952	if (ext4_has_inline_data(inode))
2953		ret = ext4_readpage_inline(inode, page);
2954
2955	if (ret == -EAGAIN)
2956		return mpage_readpage(page, ext4_get_block);
2957
2958	return ret;
2959}
2960
2961static int
2962ext4_readpages(struct file *file, struct address_space *mapping,
2963		struct list_head *pages, unsigned nr_pages)
2964{
2965	struct inode *inode = mapping->host;
2966
2967	/* If the file has inline data, no need to do readpages. */
2968	if (ext4_has_inline_data(inode))
2969		return 0;
2970
2971	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2972}
2973
2974static void ext4_invalidatepage(struct page *page, unsigned int offset,
2975				unsigned int length)
2976{
2977	trace_ext4_invalidatepage(page, offset, length);
 
2978
2979	/* No journalling happens on data buffers when this function is used */
2980	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
2981
2982	block_invalidatepage(page, offset, length);
 
 
 
 
 
 
 
 
 
2983}
2984
2985static int __ext4_journalled_invalidatepage(struct page *page,
2986					    unsigned int offset,
2987					    unsigned int length)
2988{
2989	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2990
2991	trace_ext4_journalled_invalidatepage(page, offset, length);
2992
2993	/*
 
 
 
 
 
2994	 * If it's a full truncate we just forget about the pending dirtying
2995	 */
2996	if (offset == 0 && length == PAGE_CACHE_SIZE)
2997		ClearPageChecked(page);
2998
2999	return jbd2_journal_invalidatepage(journal, page, offset, length);
3000}
3001
3002/* Wrapper for aops... */
3003static void ext4_journalled_invalidatepage(struct page *page,
3004					   unsigned int offset,
3005					   unsigned int length)
3006{
3007	WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3008}
3009
3010static int ext4_releasepage(struct page *page, gfp_t wait)
3011{
3012	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3013
3014	trace_ext4_releasepage(page);
3015
3016	/* Page has dirty journalled data -> cannot release */
3017	if (PageChecked(page))
3018		return 0;
3019	if (journal)
3020		return jbd2_journal_try_to_free_buffers(journal, page, wait);
3021	else
3022		return try_to_free_buffers(page);
3023}
3024
3025/*
3026 * ext4_get_block used when preparing for a DIO write or buffer write.
3027 * We allocate an uinitialized extent if blocks haven't been allocated.
3028 * The extent will be converted to initialized after the IO is complete.
3029 */
3030int ext4_get_block_write(struct inode *inode, sector_t iblock,
3031		   struct buffer_head *bh_result, int create)
3032{
3033	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3034		   inode->i_ino, create);
3035	return _ext4_get_block(inode, iblock, bh_result,
3036			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
3037}
3038
3039static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
3040		   struct buffer_head *bh_result, int create)
3041{
3042	ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
3043		   inode->i_ino, create);
3044	return _ext4_get_block(inode, iblock, bh_result,
3045			       EXT4_GET_BLOCKS_NO_LOCK);
3046}
3047
3048static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3049			    ssize_t size, void *private)
 
3050{
 
3051        ext4_io_end_t *io_end = iocb->private;
 
 
 
3052
3053	/* if not async direct IO just return */
3054	if (!io_end)
3055		return;
3056
3057	ext_debug("ext4_end_io_dio(): io_end 0x%p "
3058		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3059 		  iocb->private, io_end->inode->i_ino, iocb, offset,
3060		  size);
3061
3062	iocb->private = NULL;
 
 
 
 
 
 
 
 
 
 
 
3063	io_end->offset = offset;
3064	io_end->size = size;
3065	ext4_put_io_end(io_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3066}
3067
3068/*
3069 * For ext4 extent files, ext4 will do direct-io write to holes,
3070 * preallocated extents, and those write extend the file, no need to
3071 * fall back to buffered IO.
3072 *
3073 * For holes, we fallocate those blocks, mark them as uninitialized
3074 * If those blocks were preallocated, we mark sure they are split, but
3075 * still keep the range to write as uninitialized.
3076 *
3077 * The unwritten extents will be converted to written when DIO is completed.
3078 * For async direct IO, since the IO may still pending when return, we
3079 * set up an end_io call back function, which will do the conversion
3080 * when async direct IO completed.
3081 *
3082 * If the O_DIRECT write will extend the file then add this inode to the
3083 * orphan list.  So recovery will truncate it back to the original size
3084 * if the machine crashes during the write.
3085 *
3086 */
3087static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3088			      const struct iovec *iov, loff_t offset,
3089			      unsigned long nr_segs)
3090{
3091	struct file *file = iocb->ki_filp;
3092	struct inode *inode = file->f_mapping->host;
3093	ssize_t ret;
3094	size_t count = iov_length(iov, nr_segs);
3095	int overwrite = 0;
3096	get_block_t *get_block_func = NULL;
3097	int dio_flags = 0;
3098	loff_t final_size = offset + count;
3099	ext4_io_end_t *io_end = NULL;
3100
3101	/* Use the old path for reads and writes beyond i_size. */
3102	if (rw != WRITE || final_size > inode->i_size)
3103		return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3104
3105	BUG_ON(iocb->private == NULL);
3106
3107	/*
3108	 * Make all waiters for direct IO properly wait also for extent
3109	 * conversion. This also disallows race between truncate() and
3110	 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3111	 */
3112	if (rw == WRITE)
3113		atomic_inc(&inode->i_dio_count);
3114
3115	/* If we do a overwrite dio, i_mutex locking can be released */
3116	overwrite = *((int *)iocb->private);
3117
3118	if (overwrite) {
3119		down_read(&EXT4_I(inode)->i_data_sem);
3120		mutex_unlock(&inode->i_mutex);
3121	}
3122
3123	/*
3124	 * We could direct write to holes and fallocate.
3125	 *
3126	 * Allocated blocks to fill the hole are marked as
3127	 * uninitialized to prevent parallel buffered read to expose
3128	 * the stale data before DIO complete the data IO.
3129	 *
3130	 * As to previously fallocated extents, ext4 get_block will
3131	 * just simply mark the buffer mapped but still keep the
3132	 * extents uninitialized.
3133	 *
3134	 * For non AIO case, we will convert those unwritten extents
3135	 * to written after return back from blockdev_direct_IO.
3136	 *
3137	 * For async DIO, the conversion needs to be deferred when the
3138	 * IO is completed. The ext4 end_io callback function will be
3139	 * called to take care of the conversion work.  Here for async
3140	 * case, we allocate an io_end structure to hook to the iocb.
3141	 */
3142	iocb->private = NULL;
3143	ext4_inode_aio_set(inode, NULL);
3144	if (!is_sync_kiocb(iocb)) {
3145		io_end = ext4_init_io_end(inode, GFP_NOFS);
3146		if (!io_end) {
3147			ret = -ENOMEM;
3148			goto retake_lock;
3149		}
3150		/*
3151		 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3152		 */
3153		iocb->private = ext4_get_io_end(io_end);
3154		/*
3155		 * we save the io structure for current async direct
3156		 * IO, so that later ext4_map_blocks() could flag the
3157		 * io structure whether there is a unwritten extents
3158		 * needs to be converted when IO is completed.
3159		 */
3160		ext4_inode_aio_set(inode, io_end);
3161	}
3162
3163	if (overwrite) {
3164		get_block_func = ext4_get_block_write_nolock;
3165	} else {
3166		get_block_func = ext4_get_block_write;
3167		dio_flags = DIO_LOCKING;
3168	}
3169	ret = __blockdev_direct_IO(rw, iocb, inode,
3170				   inode->i_sb->s_bdev, iov,
3171				   offset, nr_segs,
3172				   get_block_func,
3173				   ext4_end_io_dio,
3174				   NULL,
3175				   dio_flags);
3176
3177	/*
3178	 * Put our reference to io_end. This can free the io_end structure e.g.
3179	 * in sync IO case or in case of error. It can even perform extent
3180	 * conversion if all bios we submitted finished before we got here.
3181	 * Note that in that case iocb->private can be already set to NULL
3182	 * here.
3183	 */
3184	if (io_end) {
3185		ext4_inode_aio_set(inode, NULL);
3186		ext4_put_io_end(io_end);
3187		/*
3188		 * When no IO was submitted ext4_end_io_dio() was not
3189		 * called so we have to put iocb's reference.
3190		 */
3191		if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
3192			WARN_ON(iocb->private != io_end);
3193			WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
3194			ext4_put_io_end(io_end);
3195			iocb->private = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
3196		}
3197	}
3198	if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3199						EXT4_STATE_DIO_UNWRITTEN)) {
3200		int err;
3201		/*
3202		 * for non AIO case, since the IO is already
3203		 * completed, we could do the conversion right here
3204		 */
3205		err = ext4_convert_unwritten_extents(NULL, inode,
3206						     offset, ret);
3207		if (err < 0)
3208			ret = err;
3209		ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3210	}
3211
3212retake_lock:
3213	if (rw == WRITE)
3214		inode_dio_done(inode);
3215	/* take i_mutex locking again if we do a ovewrite dio */
3216	if (overwrite) {
3217		up_read(&EXT4_I(inode)->i_data_sem);
3218		mutex_lock(&inode->i_mutex);
3219	}
3220
3221	return ret;
3222}
3223
3224static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3225			      const struct iovec *iov, loff_t offset,
3226			      unsigned long nr_segs)
3227{
3228	struct file *file = iocb->ki_filp;
3229	struct inode *inode = file->f_mapping->host;
3230	ssize_t ret;
3231
3232	/*
3233	 * If we are doing data journalling we don't support O_DIRECT
3234	 */
3235	if (ext4_should_journal_data(inode))
3236		return 0;
3237
3238	/* Let buffer I/O handle the inline data case. */
3239	if (ext4_has_inline_data(inode))
3240		return 0;
3241
3242	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3243	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3244		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3245	else
3246		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3247	trace_ext4_direct_IO_exit(inode, offset,
3248				iov_length(iov, nr_segs), rw, ret);
3249	return ret;
3250}
3251
3252/*
3253 * Pages can be marked dirty completely asynchronously from ext4's journalling
3254 * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3255 * much here because ->set_page_dirty is called under VFS locks.  The page is
3256 * not necessarily locked.
3257 *
3258 * We cannot just dirty the page and leave attached buffers clean, because the
3259 * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3260 * or jbddirty because all the journalling code will explode.
3261 *
3262 * So what we do is to mark the page "pending dirty" and next time writepage
3263 * is called, propagate that into the buffers appropriately.
3264 */
3265static int ext4_journalled_set_page_dirty(struct page *page)
3266{
3267	SetPageChecked(page);
3268	return __set_page_dirty_nobuffers(page);
3269}
3270
3271static const struct address_space_operations ext4_aops = {
3272	.readpage		= ext4_readpage,
3273	.readpages		= ext4_readpages,
3274	.writepage		= ext4_writepage,
3275	.writepages		= ext4_writepages,
3276	.write_begin		= ext4_write_begin,
3277	.write_end		= ext4_write_end,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3278	.bmap			= ext4_bmap,
3279	.invalidatepage		= ext4_invalidatepage,
3280	.releasepage		= ext4_releasepage,
3281	.direct_IO		= ext4_direct_IO,
3282	.migratepage		= buffer_migrate_page,
3283	.is_partially_uptodate  = block_is_partially_uptodate,
3284	.error_remove_page	= generic_error_remove_page,
3285};
3286
3287static const struct address_space_operations ext4_journalled_aops = {
3288	.readpage		= ext4_readpage,
3289	.readpages		= ext4_readpages,
3290	.writepage		= ext4_writepage,
3291	.writepages		= ext4_writepages,
3292	.write_begin		= ext4_write_begin,
3293	.write_end		= ext4_journalled_write_end,
3294	.set_page_dirty		= ext4_journalled_set_page_dirty,
3295	.bmap			= ext4_bmap,
3296	.invalidatepage		= ext4_journalled_invalidatepage,
3297	.releasepage		= ext4_releasepage,
3298	.direct_IO		= ext4_direct_IO,
3299	.is_partially_uptodate  = block_is_partially_uptodate,
3300	.error_remove_page	= generic_error_remove_page,
3301};
3302
3303static const struct address_space_operations ext4_da_aops = {
3304	.readpage		= ext4_readpage,
3305	.readpages		= ext4_readpages,
3306	.writepage		= ext4_writepage,
3307	.writepages		= ext4_writepages,
3308	.write_begin		= ext4_da_write_begin,
3309	.write_end		= ext4_da_write_end,
3310	.bmap			= ext4_bmap,
3311	.invalidatepage		= ext4_da_invalidatepage,
3312	.releasepage		= ext4_releasepage,
3313	.direct_IO		= ext4_direct_IO,
3314	.migratepage		= buffer_migrate_page,
3315	.is_partially_uptodate  = block_is_partially_uptodate,
3316	.error_remove_page	= generic_error_remove_page,
3317};
3318
3319void ext4_set_aops(struct inode *inode)
3320{
3321	switch (ext4_inode_journal_mode(inode)) {
3322	case EXT4_INODE_ORDERED_DATA_MODE:
3323		ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE);
 
 
 
3324		break;
3325	case EXT4_INODE_WRITEBACK_DATA_MODE:
3326		ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE);
 
 
 
3327		break;
3328	case EXT4_INODE_JOURNAL_DATA_MODE:
3329		inode->i_mapping->a_ops = &ext4_journalled_aops;
3330		return;
3331	default:
3332		BUG();
3333	}
3334	if (test_opt(inode->i_sb, DELALLOC))
3335		inode->i_mapping->a_ops = &ext4_da_aops;
3336	else
3337		inode->i_mapping->a_ops = &ext4_aops;
3338}
3339
 
3340/*
3341 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3342 * starting from file offset 'from'.  The range to be zero'd must
3343 * be contained with in one block.  If the specified range exceeds
3344 * the end of the block it will be shortened to end of the block
3345 * that cooresponds to 'from'
3346 */
3347static int ext4_block_zero_page_range(handle_t *handle,
3348		struct address_space *mapping, loff_t from, loff_t length)
 
 
3349{
3350	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3351	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3352	unsigned blocksize, max, pos;
3353	ext4_lblk_t iblock;
3354	struct inode *inode = mapping->host;
3355	struct buffer_head *bh;
3356	struct page *page;
3357	int err = 0;
3358
3359	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3360				   mapping_gfp_mask(mapping) & ~__GFP_FS);
3361	if (!page)
3362		return -ENOMEM;
3363
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3364	blocksize = inode->i_sb->s_blocksize;
3365	max = blocksize - (offset & (blocksize - 1));
 
 
 
3366
3367	/*
3368	 * correct length if it does not fall between
3369	 * 'from' and the end of the block
3370	 */
3371	if (length > max || length < 0)
3372		length = max;
3373
3374	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3375
3376	if (!page_has_buffers(page))
3377		create_empty_buffers(page, blocksize, 0);
3378
3379	/* Find the buffer that contains "offset" */
3380	bh = page_buffers(page);
3381	pos = blocksize;
3382	while (offset >= pos) {
3383		bh = bh->b_this_page;
3384		iblock++;
3385		pos += blocksize;
3386	}
3387	if (buffer_freed(bh)) {
3388		BUFFER_TRACE(bh, "freed: skip");
3389		goto unlock;
3390	}
3391	if (!buffer_mapped(bh)) {
3392		BUFFER_TRACE(bh, "unmapped");
3393		ext4_get_block(inode, iblock, bh, 0);
3394		/* unmapped? It's a hole - nothing to do */
3395		if (!buffer_mapped(bh)) {
3396			BUFFER_TRACE(bh, "still unmapped");
3397			goto unlock;
3398		}
3399	}
3400
3401	/* Ok, it's mapped. Make sure it's up-to-date */
3402	if (PageUptodate(page))
3403		set_buffer_uptodate(bh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3404
3405	if (!buffer_uptodate(bh)) {
3406		err = -EIO;
3407		ll_rw_block(READ, 1, &bh);
3408		wait_on_buffer(bh);
3409		/* Uhhuh. Read error. Complain and punt. */
3410		if (!buffer_uptodate(bh))
3411			goto unlock;
3412	}
3413	if (ext4_should_journal_data(inode)) {
3414		BUFFER_TRACE(bh, "get write access");
3415		err = ext4_journal_get_write_access(handle, bh);
3416		if (err)
3417			goto unlock;
3418	}
3419	zero_user(page, offset, length);
3420	BUFFER_TRACE(bh, "zeroed end of block");
3421
3422	if (ext4_should_journal_data(inode)) {
3423		err = ext4_handle_dirty_metadata(handle, inode, bh);
3424	} else {
3425		err = 0;
3426		mark_buffer_dirty(bh);
3427		if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
3428			err = ext4_jbd2_file_inode(handle, inode);
3429	}
3430
3431unlock:
3432	unlock_page(page);
3433	page_cache_release(page);
3434	return err;
3435}
 
 
 
 
 
 
 
 
 
3436
3437/*
3438 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3439 * up to the end of the block which corresponds to `from'.
3440 * This required during truncate. We need to physically zero the tail end
3441 * of that block so it doesn't yield old data if the file is later grown.
3442 */
3443int ext4_block_truncate_page(handle_t *handle,
3444		struct address_space *mapping, loff_t from)
3445{
3446	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3447	unsigned length;
3448	unsigned blocksize;
3449	struct inode *inode = mapping->host;
 
 
 
 
 
 
 
3450
3451	blocksize = inode->i_sb->s_blocksize;
3452	length = blocksize - (offset & (blocksize - 1));
 
3453
3454	return ext4_block_zero_page_range(handle, mapping, from, length);
3455}
 
 
 
 
 
 
3456
3457int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3458			     loff_t lstart, loff_t length)
3459{
3460	struct super_block *sb = inode->i_sb;
3461	struct address_space *mapping = inode->i_mapping;
3462	unsigned partial_start, partial_end;
3463	ext4_fsblk_t start, end;
3464	loff_t byte_end = (lstart + length - 1);
3465	int err = 0;
3466
3467	partial_start = lstart & (sb->s_blocksize - 1);
3468	partial_end = byte_end & (sb->s_blocksize - 1);
3469
3470	start = lstart >> sb->s_blocksize_bits;
3471	end = byte_end >> sb->s_blocksize_bits;
 
 
 
3472
3473	/* Handle partial zero within the single block */
3474	if (start == end &&
3475	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
3476		err = ext4_block_zero_page_range(handle, mapping,
3477						 lstart, length);
3478		return err;
3479	}
3480	/* Handle partial zero out on the start of the range */
3481	if (partial_start) {
3482		err = ext4_block_zero_page_range(handle, mapping,
3483						 lstart, sb->s_blocksize);
3484		if (err)
3485			return err;
3486	}
3487	/* Handle partial zero out on the end of the range */
3488	if (partial_end != sb->s_blocksize - 1)
3489		err = ext4_block_zero_page_range(handle, mapping,
3490						 byte_end - partial_end,
3491						 partial_end + 1);
3492	return err;
3493}
3494
3495int ext4_can_truncate(struct inode *inode)
3496{
3497	if (S_ISREG(inode->i_mode))
3498		return 1;
3499	if (S_ISDIR(inode->i_mode))
3500		return 1;
3501	if (S_ISLNK(inode->i_mode))
3502		return !ext4_inode_is_fast_symlink(inode);
3503	return 0;
3504}
3505
3506/*
3507 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3508 * associated with the given offset and length
3509 *
3510 * @inode:  File inode
3511 * @offset: The offset where the hole will begin
3512 * @len:    The length of the hole
3513 *
3514 * Returns: 0 on success or negative on failure
3515 */
3516
3517int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3518{
3519	struct super_block *sb = inode->i_sb;
3520	ext4_lblk_t first_block, stop_block;
3521	struct address_space *mapping = inode->i_mapping;
3522	loff_t first_block_offset, last_block_offset;
3523	handle_t *handle;
3524	unsigned int credits;
3525	int ret = 0;
3526
3527	if (!S_ISREG(inode->i_mode))
3528		return -EOPNOTSUPP;
3529
3530	trace_ext4_punch_hole(inode, offset, length, 0);
3531
3532	/*
3533	 * Write out all dirty pages to avoid race conditions
3534	 * Then release them.
3535	 */
3536	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3537		ret = filemap_write_and_wait_range(mapping, offset,
3538						   offset + length - 1);
3539		if (ret)
3540			return ret;
3541	}
3542
3543	mutex_lock(&inode->i_mutex);
3544
3545	/* No need to punch hole beyond i_size */
3546	if (offset >= inode->i_size)
3547		goto out_mutex;
3548
3549	/*
3550	 * If the hole extends beyond i_size, set the hole
3551	 * to end after the page that contains i_size
3552	 */
3553	if (offset + length > inode->i_size) {
3554		length = inode->i_size +
3555		   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
3556		   offset;
3557	}
3558
3559	if (offset & (sb->s_blocksize - 1) ||
3560	    (offset + length) & (sb->s_blocksize - 1)) {
3561		/*
3562		 * Attach jinode to inode for jbd2 if we do any zeroing of
3563		 * partial block
3564		 */
3565		ret = ext4_inode_attach_jinode(inode);
3566		if (ret < 0)
3567			goto out_mutex;
3568
3569	}
3570
3571	first_block_offset = round_up(offset, sb->s_blocksize);
3572	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3573
3574	/* Now release the pages and zero block aligned part of pages*/
3575	if (last_block_offset > first_block_offset)
3576		truncate_pagecache_range(inode, first_block_offset,
3577					 last_block_offset);
3578
3579	/* Wait all existing dio workers, newcomers will block on i_mutex */
3580	ext4_inode_block_unlocked_dio(inode);
3581	inode_dio_wait(inode);
3582
3583	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3584		credits = ext4_writepage_trans_blocks(inode);
3585	else
3586		credits = ext4_blocks_for_truncate(inode);
3587	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3588	if (IS_ERR(handle)) {
3589		ret = PTR_ERR(handle);
3590		ext4_std_error(sb, ret);
3591		goto out_dio;
3592	}
3593
3594	ret = ext4_zero_partial_blocks(handle, inode, offset,
3595				       length);
3596	if (ret)
3597		goto out_stop;
3598
3599	first_block = (offset + sb->s_blocksize - 1) >>
3600		EXT4_BLOCK_SIZE_BITS(sb);
3601	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
3602
3603	/* If there are no blocks to remove, return now */
3604	if (first_block >= stop_block)
3605		goto out_stop;
3606
3607	down_write(&EXT4_I(inode)->i_data_sem);
3608	ext4_discard_preallocations(inode);
3609
3610	ret = ext4_es_remove_extent(inode, first_block,
3611				    stop_block - first_block);
3612	if (ret) {
3613		up_write(&EXT4_I(inode)->i_data_sem);
3614		goto out_stop;
3615	}
3616
3617	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3618		ret = ext4_ext_remove_space(inode, first_block,
3619					    stop_block - 1);
3620	else
3621		ret = ext4_free_hole_blocks(handle, inode, first_block,
3622					    stop_block);
3623
3624	up_write(&EXT4_I(inode)->i_data_sem);
3625	if (IS_SYNC(inode))
3626		ext4_handle_sync(handle);
3627
3628	/* Now release the pages again to reduce race window */
3629	if (last_block_offset > first_block_offset)
3630		truncate_pagecache_range(inode, first_block_offset,
3631					 last_block_offset);
3632
3633	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3634	ext4_mark_inode_dirty(handle, inode);
3635out_stop:
3636	ext4_journal_stop(handle);
3637out_dio:
3638	ext4_inode_resume_unlocked_dio(inode);
3639out_mutex:
3640	mutex_unlock(&inode->i_mutex);
3641	return ret;
3642}
3643
3644int ext4_inode_attach_jinode(struct inode *inode)
3645{
3646	struct ext4_inode_info *ei = EXT4_I(inode);
3647	struct jbd2_inode *jinode;
3648
3649	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
3650		return 0;
3651
3652	jinode = jbd2_alloc_inode(GFP_KERNEL);
3653	spin_lock(&inode->i_lock);
3654	if (!ei->jinode) {
3655		if (!jinode) {
3656			spin_unlock(&inode->i_lock);
3657			return -ENOMEM;
3658		}
3659		ei->jinode = jinode;
3660		jbd2_journal_init_jbd_inode(ei->jinode, inode);
3661		jinode = NULL;
3662	}
3663	spin_unlock(&inode->i_lock);
3664	if (unlikely(jinode != NULL))
3665		jbd2_free_inode(jinode);
3666	return 0;
3667}
3668
3669/*
3670 * ext4_truncate()
3671 *
3672 * We block out ext4_get_block() block instantiations across the entire
3673 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3674 * simultaneously on behalf of the same inode.
3675 *
3676 * As we work through the truncate and commit bits of it to the journal there
3677 * is one core, guiding principle: the file's tree must always be consistent on
3678 * disk.  We must be able to restart the truncate after a crash.
3679 *
3680 * The file's tree may be transiently inconsistent in memory (although it
3681 * probably isn't), but whenever we close off and commit a journal transaction,
3682 * the contents of (the filesystem + the journal) must be consistent and
3683 * restartable.  It's pretty simple, really: bottom up, right to left (although
3684 * left-to-right works OK too).
3685 *
3686 * Note that at recovery time, journal replay occurs *before* the restart of
3687 * truncate against the orphan inode list.
3688 *
3689 * The committed inode has the new, desired i_size (which is the same as
3690 * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3691 * that this inode's truncate did not complete and it will again call
3692 * ext4_truncate() to have another go.  So there will be instantiated blocks
3693 * to the right of the truncation point in a crashed ext4 filesystem.  But
3694 * that's fine - as long as they are linked from the inode, the post-crash
3695 * ext4_truncate() run will find them and release them.
3696 */
3697void ext4_truncate(struct inode *inode)
3698{
3699	struct ext4_inode_info *ei = EXT4_I(inode);
3700	unsigned int credits;
3701	handle_t *handle;
3702	struct address_space *mapping = inode->i_mapping;
3703
3704	/*
3705	 * There is a possibility that we're either freeing the inode
3706	 * or it's a completely new inode. In those cases we might not
3707	 * have i_mutex locked because it's not necessary.
3708	 */
3709	if (!(inode->i_state & (I_NEW|I_FREEING)))
3710		WARN_ON(!mutex_is_locked(&inode->i_mutex));
3711	trace_ext4_truncate_enter(inode);
3712
3713	if (!ext4_can_truncate(inode))
3714		return;
3715
3716	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3717
3718	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3719		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3720
3721	if (ext4_has_inline_data(inode)) {
3722		int has_inline = 1;
3723
3724		ext4_inline_data_truncate(inode, &has_inline);
3725		if (has_inline)
3726			return;
3727	}
3728
3729	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
3730	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
3731		if (ext4_inode_attach_jinode(inode) < 0)
3732			return;
3733	}
3734
3735	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3736		credits = ext4_writepage_trans_blocks(inode);
3737	else
3738		credits = ext4_blocks_for_truncate(inode);
3739
3740	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3741	if (IS_ERR(handle)) {
3742		ext4_std_error(inode->i_sb, PTR_ERR(handle));
3743		return;
3744	}
3745
3746	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
3747		ext4_block_truncate_page(handle, mapping, inode->i_size);
3748
3749	/*
3750	 * We add the inode to the orphan list, so that if this
3751	 * truncate spans multiple transactions, and we crash, we will
3752	 * resume the truncate when the filesystem recovers.  It also
3753	 * marks the inode dirty, to catch the new size.
3754	 *
3755	 * Implication: the file must always be in a sane, consistent
3756	 * truncatable state while each transaction commits.
3757	 */
3758	if (ext4_orphan_add(handle, inode))
3759		goto out_stop;
3760
3761	down_write(&EXT4_I(inode)->i_data_sem);
3762
3763	ext4_discard_preallocations(inode);
3764
3765	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3766		ext4_ext_truncate(handle, inode);
3767	else
3768		ext4_ind_truncate(handle, inode);
3769
3770	up_write(&ei->i_data_sem);
3771
3772	if (IS_SYNC(inode))
3773		ext4_handle_sync(handle);
3774
3775out_stop:
3776	/*
3777	 * If this was a simple ftruncate() and the file will remain alive,
3778	 * then we need to clear up the orphan record which we created above.
3779	 * However, if this was a real unlink then we were called by
3780	 * ext4_delete_inode(), and we allow that function to clean up the
3781	 * orphan info for us.
3782	 */
3783	if (inode->i_nlink)
3784		ext4_orphan_del(handle, inode);
3785
3786	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3787	ext4_mark_inode_dirty(handle, inode);
3788	ext4_journal_stop(handle);
3789
3790	trace_ext4_truncate_exit(inode);
3791}
3792
3793/*
3794 * ext4_get_inode_loc returns with an extra refcount against the inode's
3795 * underlying buffer_head on success. If 'in_mem' is true, we have all
3796 * data in memory that is needed to recreate the on-disk version of this
3797 * inode.
3798 */
3799static int __ext4_get_inode_loc(struct inode *inode,
3800				struct ext4_iloc *iloc, int in_mem)
3801{
3802	struct ext4_group_desc	*gdp;
3803	struct buffer_head	*bh;
3804	struct super_block	*sb = inode->i_sb;
3805	ext4_fsblk_t		block;
3806	int			inodes_per_block, inode_offset;
3807
3808	iloc->bh = NULL;
3809	if (!ext4_valid_inum(sb, inode->i_ino))
3810		return -EIO;
3811
3812	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3813	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3814	if (!gdp)
3815		return -EIO;
3816
3817	/*
3818	 * Figure out the offset within the block group inode table
3819	 */
3820	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3821	inode_offset = ((inode->i_ino - 1) %
3822			EXT4_INODES_PER_GROUP(sb));
3823	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3824	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3825
3826	bh = sb_getblk(sb, block);
3827	if (unlikely(!bh))
3828		return -ENOMEM;
 
 
 
3829	if (!buffer_uptodate(bh)) {
3830		lock_buffer(bh);
3831
3832		/*
3833		 * If the buffer has the write error flag, we have failed
3834		 * to write out another inode in the same block.  In this
3835		 * case, we don't have to read the block because we may
3836		 * read the old inode data successfully.
3837		 */
3838		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3839			set_buffer_uptodate(bh);
3840
3841		if (buffer_uptodate(bh)) {
3842			/* someone brought it uptodate while we waited */
3843			unlock_buffer(bh);
3844			goto has_buffer;
3845		}
3846
3847		/*
3848		 * If we have all information of the inode in memory and this
3849		 * is the only valid inode in the block, we need not read the
3850		 * block.
3851		 */
3852		if (in_mem) {
3853			struct buffer_head *bitmap_bh;
3854			int i, start;
3855
3856			start = inode_offset & ~(inodes_per_block - 1);
3857
3858			/* Is the inode bitmap in cache? */
3859			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3860			if (unlikely(!bitmap_bh))
3861				goto make_io;
3862
3863			/*
3864			 * If the inode bitmap isn't in cache then the
3865			 * optimisation may end up performing two reads instead
3866			 * of one, so skip it.
3867			 */
3868			if (!buffer_uptodate(bitmap_bh)) {
3869				brelse(bitmap_bh);
3870				goto make_io;
3871			}
3872			for (i = start; i < start + inodes_per_block; i++) {
3873				if (i == inode_offset)
3874					continue;
3875				if (ext4_test_bit(i, bitmap_bh->b_data))
3876					break;
3877			}
3878			brelse(bitmap_bh);
3879			if (i == start + inodes_per_block) {
3880				/* all other inodes are free, so skip I/O */
3881				memset(bh->b_data, 0, bh->b_size);
3882				set_buffer_uptodate(bh);
3883				unlock_buffer(bh);
3884				goto has_buffer;
3885			}
3886		}
3887
3888make_io:
3889		/*
3890		 * If we need to do any I/O, try to pre-readahead extra
3891		 * blocks from the inode table.
3892		 */
3893		if (EXT4_SB(sb)->s_inode_readahead_blks) {
3894			ext4_fsblk_t b, end, table;
3895			unsigned num;
3896			__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
3897
3898			table = ext4_inode_table(sb, gdp);
3899			/* s_inode_readahead_blks is always a power of 2 */
3900			b = block & ~((ext4_fsblk_t) ra_blks - 1);
3901			if (table > b)
3902				b = table;
3903			end = b + ra_blks;
3904			num = EXT4_INODES_PER_GROUP(sb);
3905			if (ext4_has_group_desc_csum(sb))
3906				num -= ext4_itable_unused_count(sb, gdp);
3907			table += num / inodes_per_block;
3908			if (end > table)
3909				end = table;
3910			while (b <= end)
3911				sb_breadahead(sb, b++);
3912		}
3913
3914		/*
3915		 * There are other valid inodes in the buffer, this inode
3916		 * has in-inode xattrs, or we don't have this inode in memory.
3917		 * Read the block from disk.
3918		 */
3919		trace_ext4_load_inode(inode);
3920		get_bh(bh);
3921		bh->b_end_io = end_buffer_read_sync;
3922		submit_bh(READ | REQ_META | REQ_PRIO, bh);
3923		wait_on_buffer(bh);
3924		if (!buffer_uptodate(bh)) {
3925			EXT4_ERROR_INODE_BLOCK(inode, block,
3926					       "unable to read itable block");
3927			brelse(bh);
3928			return -EIO;
3929		}
3930	}
3931has_buffer:
3932	iloc->bh = bh;
3933	return 0;
3934}
3935
3936int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3937{
3938	/* We have all inode data except xattrs in memory here. */
3939	return __ext4_get_inode_loc(inode, iloc,
3940		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3941}
3942
3943void ext4_set_inode_flags(struct inode *inode)
3944{
3945	unsigned int flags = EXT4_I(inode)->i_flags;
3946	unsigned int new_fl = 0;
3947
 
3948	if (flags & EXT4_SYNC_FL)
3949		new_fl |= S_SYNC;
3950	if (flags & EXT4_APPEND_FL)
3951		new_fl |= S_APPEND;
3952	if (flags & EXT4_IMMUTABLE_FL)
3953		new_fl |= S_IMMUTABLE;
3954	if (flags & EXT4_NOATIME_FL)
3955		new_fl |= S_NOATIME;
3956	if (flags & EXT4_DIRSYNC_FL)
3957		new_fl |= S_DIRSYNC;
3958	inode_set_flags(inode, new_fl,
3959			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3960}
3961
3962/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3963void ext4_get_inode_flags(struct ext4_inode_info *ei)
3964{
3965	unsigned int vfs_fl;
3966	unsigned long old_fl, new_fl;
3967
3968	do {
3969		vfs_fl = ei->vfs_inode.i_flags;
3970		old_fl = ei->i_flags;
3971		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3972				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3973				EXT4_DIRSYNC_FL);
3974		if (vfs_fl & S_SYNC)
3975			new_fl |= EXT4_SYNC_FL;
3976		if (vfs_fl & S_APPEND)
3977			new_fl |= EXT4_APPEND_FL;
3978		if (vfs_fl & S_IMMUTABLE)
3979			new_fl |= EXT4_IMMUTABLE_FL;
3980		if (vfs_fl & S_NOATIME)
3981			new_fl |= EXT4_NOATIME_FL;
3982		if (vfs_fl & S_DIRSYNC)
3983			new_fl |= EXT4_DIRSYNC_FL;
3984	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3985}
3986
3987static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3988				  struct ext4_inode_info *ei)
3989{
3990	blkcnt_t i_blocks ;
3991	struct inode *inode = &(ei->vfs_inode);
3992	struct super_block *sb = inode->i_sb;
3993
3994	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3995				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3996		/* we are using combined 48 bit field */
3997		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3998					le32_to_cpu(raw_inode->i_blocks_lo);
3999		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4000			/* i_blocks represent file system block size */
4001			return i_blocks  << (inode->i_blkbits - 9);
4002		} else {
4003			return i_blocks;
4004		}
4005	} else {
4006		return le32_to_cpu(raw_inode->i_blocks_lo);
4007	}
4008}
4009
4010static inline void ext4_iget_extra_inode(struct inode *inode,
4011					 struct ext4_inode *raw_inode,
4012					 struct ext4_inode_info *ei)
4013{
4014	__le32 *magic = (void *)raw_inode +
4015			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4016	if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4017		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4018		ext4_find_inline_data_nolock(inode);
4019	} else
4020		EXT4_I(inode)->i_inline_off = 0;
4021}
4022
4023struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4024{
4025	struct ext4_iloc iloc;
4026	struct ext4_inode *raw_inode;
4027	struct ext4_inode_info *ei;
4028	struct inode *inode;
4029	journal_t *journal = EXT4_SB(sb)->s_journal;
4030	long ret;
4031	int block;
4032	uid_t i_uid;
4033	gid_t i_gid;
4034
4035	inode = iget_locked(sb, ino);
4036	if (!inode)
4037		return ERR_PTR(-ENOMEM);
4038	if (!(inode->i_state & I_NEW))
4039		return inode;
4040
4041	ei = EXT4_I(inode);
4042	iloc.bh = NULL;
4043
4044	ret = __ext4_get_inode_loc(inode, &iloc, 0);
4045	if (ret < 0)
4046		goto bad_inode;
4047	raw_inode = ext4_raw_inode(&iloc);
4048
4049	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4050		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4051		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4052		    EXT4_INODE_SIZE(inode->i_sb)) {
4053			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
4054				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
4055				EXT4_INODE_SIZE(inode->i_sb));
4056			ret = -EIO;
4057			goto bad_inode;
4058		}
4059	} else
4060		ei->i_extra_isize = 0;
4061
4062	/* Precompute checksum seed for inode metadata */
4063	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4064			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
4065		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4066		__u32 csum;
4067		__le32 inum = cpu_to_le32(inode->i_ino);
4068		__le32 gen = raw_inode->i_generation;
4069		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4070				   sizeof(inum));
4071		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4072					      sizeof(gen));
4073	}
4074
4075	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4076		EXT4_ERROR_INODE(inode, "checksum invalid");
4077		ret = -EIO;
4078		goto bad_inode;
4079	}
4080
4081	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4082	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4083	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4084	if (!(test_opt(inode->i_sb, NO_UID32))) {
4085		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4086		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4087	}
4088	i_uid_write(inode, i_uid);
4089	i_gid_write(inode, i_gid);
4090	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4091
4092	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
4093	ei->i_inline_off = 0;
4094	ei->i_dir_start_lookup = 0;
4095	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4096	/* We now have enough fields to check if the inode was active or not.
4097	 * This is needed because nfsd might try to access dead inodes
4098	 * the test is that same one that e2fsck uses
4099	 * NeilBrown 1999oct15
4100	 */
4101	if (inode->i_nlink == 0) {
4102		if ((inode->i_mode == 0 ||
4103		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4104		    ino != EXT4_BOOT_LOADER_INO) {
4105			/* this inode is deleted */
4106			ret = -ESTALE;
4107			goto bad_inode;
4108		}
4109		/* The only unlinked inodes we let through here have
4110		 * valid i_mode and are being read by the orphan
4111		 * recovery code: that's fine, we're about to complete
4112		 * the process of deleting those.
4113		 * OR it is the EXT4_BOOT_LOADER_INO which is
4114		 * not initialized on a new filesystem. */
4115	}
4116	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4117	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4118	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4119	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4120		ei->i_file_acl |=
4121			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4122	inode->i_size = ext4_isize(raw_inode);
4123	ei->i_disksize = inode->i_size;
4124#ifdef CONFIG_QUOTA
4125	ei->i_reserved_quota = 0;
4126#endif
4127	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4128	ei->i_block_group = iloc.block_group;
4129	ei->i_last_alloc_group = ~0;
4130	/*
4131	 * NOTE! The in-memory inode i_data array is in little-endian order
4132	 * even on big-endian machines: we do NOT byteswap the block numbers!
4133	 */
4134	for (block = 0; block < EXT4_N_BLOCKS; block++)
4135		ei->i_data[block] = raw_inode->i_block[block];
4136	INIT_LIST_HEAD(&ei->i_orphan);
4137
4138	/*
4139	 * Set transaction id's of transactions that have to be committed
4140	 * to finish f[data]sync. We set them to currently running transaction
4141	 * as we cannot be sure that the inode or some of its metadata isn't
4142	 * part of the transaction - the inode could have been reclaimed and
4143	 * now it is reread from disk.
4144	 */
4145	if (journal) {
4146		transaction_t *transaction;
4147		tid_t tid;
4148
4149		read_lock(&journal->j_state_lock);
4150		if (journal->j_running_transaction)
4151			transaction = journal->j_running_transaction;
4152		else
4153			transaction = journal->j_committing_transaction;
4154		if (transaction)
4155			tid = transaction->t_tid;
4156		else
4157			tid = journal->j_commit_sequence;
4158		read_unlock(&journal->j_state_lock);
4159		ei->i_sync_tid = tid;
4160		ei->i_datasync_tid = tid;
4161	}
4162
4163	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4164		if (ei->i_extra_isize == 0) {
4165			/* The extra space is currently unused. Use it. */
4166			ei->i_extra_isize = sizeof(struct ext4_inode) -
4167					    EXT4_GOOD_OLD_INODE_SIZE;
4168		} else {
4169			ext4_iget_extra_inode(inode, raw_inode, ei);
 
 
 
 
4170		}
4171	}
4172
4173	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4174	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4175	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4176	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4177
4178	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4179		inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4180		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4181			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4182				inode->i_version |=
4183		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4184		}
4185	}
4186
4187	ret = 0;
4188	if (ei->i_file_acl &&
4189	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4190		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4191				 ei->i_file_acl);
4192		ret = -EIO;
4193		goto bad_inode;
4194	} else if (!ext4_has_inline_data(inode)) {
4195		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4196			if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4197			    (S_ISLNK(inode->i_mode) &&
4198			     !ext4_inode_is_fast_symlink(inode))))
4199				/* Validate extent which is part of inode */
4200				ret = ext4_ext_check_inode(inode);
4201		} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4202			   (S_ISLNK(inode->i_mode) &&
4203			    !ext4_inode_is_fast_symlink(inode))) {
4204			/* Validate block references which are part of inode */
4205			ret = ext4_ind_check_inode(inode);
4206		}
4207	}
4208	if (ret)
4209		goto bad_inode;
4210
4211	if (S_ISREG(inode->i_mode)) {
4212		inode->i_op = &ext4_file_inode_operations;
4213		inode->i_fop = &ext4_file_operations;
4214		ext4_set_aops(inode);
4215	} else if (S_ISDIR(inode->i_mode)) {
4216		inode->i_op = &ext4_dir_inode_operations;
4217		inode->i_fop = &ext4_dir_operations;
4218	} else if (S_ISLNK(inode->i_mode)) {
4219		if (ext4_inode_is_fast_symlink(inode)) {
4220			inode->i_op = &ext4_fast_symlink_inode_operations;
4221			nd_terminate_link(ei->i_data, inode->i_size,
4222				sizeof(ei->i_data) - 1);
4223		} else {
4224			inode->i_op = &ext4_symlink_inode_operations;
4225			ext4_set_aops(inode);
4226		}
4227	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4228	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4229		inode->i_op = &ext4_special_inode_operations;
4230		if (raw_inode->i_block[0])
4231			init_special_inode(inode, inode->i_mode,
4232			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4233		else
4234			init_special_inode(inode, inode->i_mode,
4235			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4236	} else if (ino == EXT4_BOOT_LOADER_INO) {
4237		make_bad_inode(inode);
4238	} else {
4239		ret = -EIO;
4240		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4241		goto bad_inode;
4242	}
4243	brelse(iloc.bh);
4244	ext4_set_inode_flags(inode);
4245	unlock_new_inode(inode);
4246	return inode;
4247
4248bad_inode:
4249	brelse(iloc.bh);
4250	iget_failed(inode);
4251	return ERR_PTR(ret);
4252}
4253
4254static int ext4_inode_blocks_set(handle_t *handle,
4255				struct ext4_inode *raw_inode,
4256				struct ext4_inode_info *ei)
4257{
4258	struct inode *inode = &(ei->vfs_inode);
4259	u64 i_blocks = inode->i_blocks;
4260	struct super_block *sb = inode->i_sb;
4261
4262	if (i_blocks <= ~0U) {
4263		/*
4264		 * i_blocks can be represented in a 32 bit variable
4265		 * as multiple of 512 bytes
4266		 */
4267		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4268		raw_inode->i_blocks_high = 0;
4269		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4270		return 0;
4271	}
4272	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4273		return -EFBIG;
4274
4275	if (i_blocks <= 0xffffffffffffULL) {
4276		/*
4277		 * i_blocks can be represented in a 48 bit variable
4278		 * as multiple of 512 bytes
4279		 */
4280		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4281		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4282		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4283	} else {
4284		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4285		/* i_block is stored in file system block size */
4286		i_blocks = i_blocks >> (inode->i_blkbits - 9);
4287		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
4288		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4289	}
4290	return 0;
4291}
4292
4293/*
4294 * Post the struct inode info into an on-disk inode location in the
4295 * buffer-cache.  This gobbles the caller's reference to the
4296 * buffer_head in the inode location struct.
4297 *
4298 * The caller must have write access to iloc->bh.
4299 */
4300static int ext4_do_update_inode(handle_t *handle,
4301				struct inode *inode,
4302				struct ext4_iloc *iloc)
4303{
4304	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4305	struct ext4_inode_info *ei = EXT4_I(inode);
4306	struct buffer_head *bh = iloc->bh;
4307	int err = 0, rc, block;
4308	int need_datasync = 0;
4309	uid_t i_uid;
4310	gid_t i_gid;
4311
4312	/* For fields not not tracking in the in-memory inode,
4313	 * initialise them to zero for new inodes. */
4314	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4315		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4316
4317	ext4_get_inode_flags(ei);
4318	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4319	i_uid = i_uid_read(inode);
4320	i_gid = i_gid_read(inode);
4321	if (!(test_opt(inode->i_sb, NO_UID32))) {
4322		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4323		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4324/*
4325 * Fix up interoperability with old kernels. Otherwise, old inodes get
4326 * re-used with the upper 16 bits of the uid/gid intact
4327 */
4328		if (!ei->i_dtime) {
4329			raw_inode->i_uid_high =
4330				cpu_to_le16(high_16_bits(i_uid));
4331			raw_inode->i_gid_high =
4332				cpu_to_le16(high_16_bits(i_gid));
4333		} else {
4334			raw_inode->i_uid_high = 0;
4335			raw_inode->i_gid_high = 0;
4336		}
4337	} else {
4338		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4339		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4340		raw_inode->i_uid_high = 0;
4341		raw_inode->i_gid_high = 0;
4342	}
4343	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4344
4345	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4346	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4347	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4348	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4349
4350	if (ext4_inode_blocks_set(handle, raw_inode, ei))
4351		goto out_brelse;
4352	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4353	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4354	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
 
4355		raw_inode->i_file_acl_high =
4356			cpu_to_le16(ei->i_file_acl >> 32);
4357	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4358	if (ei->i_disksize != ext4_isize(raw_inode)) {
4359		ext4_isize_set(raw_inode, ei->i_disksize);
4360		need_datasync = 1;
4361	}
4362	if (ei->i_disksize > 0x7fffffffULL) {
4363		struct super_block *sb = inode->i_sb;
4364		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4365				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4366				EXT4_SB(sb)->s_es->s_rev_level ==
4367				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4368			/* If this is the first large file
4369			 * created, add a flag to the superblock.
4370			 */
4371			err = ext4_journal_get_write_access(handle,
4372					EXT4_SB(sb)->s_sbh);
4373			if (err)
4374				goto out_brelse;
4375			ext4_update_dynamic_rev(sb);
4376			EXT4_SET_RO_COMPAT_FEATURE(sb,
4377					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4378			ext4_handle_sync(handle);
4379			err = ext4_handle_dirty_super(handle, sb);
4380		}
4381	}
4382	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4383	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4384		if (old_valid_dev(inode->i_rdev)) {
4385			raw_inode->i_block[0] =
4386				cpu_to_le32(old_encode_dev(inode->i_rdev));
4387			raw_inode->i_block[1] = 0;
4388		} else {
4389			raw_inode->i_block[0] = 0;
4390			raw_inode->i_block[1] =
4391				cpu_to_le32(new_encode_dev(inode->i_rdev));
4392			raw_inode->i_block[2] = 0;
4393		}
4394	} else if (!ext4_has_inline_data(inode)) {
4395		for (block = 0; block < EXT4_N_BLOCKS; block++)
4396			raw_inode->i_block[block] = ei->i_data[block];
4397	}
4398
4399	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4400		raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4401		if (ei->i_extra_isize) {
4402			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4403				raw_inode->i_version_hi =
4404					cpu_to_le32(inode->i_version >> 32);
4405			raw_inode->i_extra_isize =
4406				cpu_to_le16(ei->i_extra_isize);
4407		}
4408	}
4409
4410	ext4_inode_csum_set(inode, raw_inode, ei);
4411
4412	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4413	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4414	if (!err)
4415		err = rc;
4416	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4417
4418	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4419out_brelse:
4420	brelse(bh);
4421	ext4_std_error(inode->i_sb, err);
4422	return err;
4423}
4424
4425/*
4426 * ext4_write_inode()
4427 *
4428 * We are called from a few places:
4429 *
4430 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
4431 *   Here, there will be no transaction running. We wait for any running
4432 *   transaction to commit.
4433 *
4434 * - Within flush work (sys_sync(), kupdate and such).
4435 *   We wait on commit, if told to.
4436 *
4437 * - Within iput_final() -> write_inode_now()
4438 *   We wait on commit, if told to.
 
4439 *
4440 * In all cases it is actually safe for us to return without doing anything,
4441 * because the inode has been copied into a raw inode buffer in
4442 * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
4443 * writeback.
4444 *
4445 * Note that we are absolutely dependent upon all inode dirtiers doing the
4446 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4447 * which we are interested.
4448 *
4449 * It would be a bug for them to not do this.  The code:
4450 *
4451 *	mark_inode_dirty(inode)
4452 *	stuff();
4453 *	inode->i_size = expr;
4454 *
4455 * is in error because write_inode() could occur while `stuff()' is running,
4456 * and the new i_size will be lost.  Plus the inode will no longer be on the
4457 * superblock's dirty inode list.
4458 */
4459int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4460{
4461	int err;
4462
4463	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
4464		return 0;
4465
4466	if (EXT4_SB(inode->i_sb)->s_journal) {
4467		if (ext4_journal_current_handle()) {
4468			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4469			dump_stack();
4470			return -EIO;
4471		}
4472
4473		/*
4474		 * No need to force transaction in WB_SYNC_NONE mode. Also
4475		 * ext4_sync_fs() will force the commit after everything is
4476		 * written.
4477		 */
4478		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
4479			return 0;
4480
4481		err = ext4_force_commit(inode->i_sb);
4482	} else {
4483		struct ext4_iloc iloc;
4484
4485		err = __ext4_get_inode_loc(inode, &iloc, 0);
4486		if (err)
4487			return err;
4488		/*
4489		 * sync(2) will flush the whole buffer cache. No need to do
4490		 * it here separately for each inode.
4491		 */
4492		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
4493			sync_dirty_buffer(iloc.bh);
4494		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4495			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4496					 "IO error syncing inode");
4497			err = -EIO;
4498		}
4499		brelse(iloc.bh);
4500	}
4501	return err;
4502}
4503
4504/*
4505 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
4506 * buffers that are attached to a page stradding i_size and are undergoing
4507 * commit. In that case we have to wait for commit to finish and try again.
4508 */
4509static void ext4_wait_for_tail_page_commit(struct inode *inode)
4510{
4511	struct page *page;
4512	unsigned offset;
4513	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
4514	tid_t commit_tid = 0;
4515	int ret;
4516
4517	offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
4518	/*
4519	 * All buffers in the last page remain valid? Then there's nothing to
4520	 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
4521	 * blocksize case
4522	 */
4523	if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
4524		return;
4525	while (1) {
4526		page = find_lock_page(inode->i_mapping,
4527				      inode->i_size >> PAGE_CACHE_SHIFT);
4528		if (!page)
4529			return;
4530		ret = __ext4_journalled_invalidatepage(page, offset,
4531						PAGE_CACHE_SIZE - offset);
4532		unlock_page(page);
4533		page_cache_release(page);
4534		if (ret != -EBUSY)
4535			return;
4536		commit_tid = 0;
4537		read_lock(&journal->j_state_lock);
4538		if (journal->j_committing_transaction)
4539			commit_tid = journal->j_committing_transaction->t_tid;
4540		read_unlock(&journal->j_state_lock);
4541		if (commit_tid)
4542			jbd2_log_wait_commit(journal, commit_tid);
4543	}
4544}
4545
4546/*
4547 * ext4_setattr()
4548 *
4549 * Called from notify_change.
4550 *
4551 * We want to trap VFS attempts to truncate the file as soon as
4552 * possible.  In particular, we want to make sure that when the VFS
4553 * shrinks i_size, we put the inode on the orphan list and modify
4554 * i_disksize immediately, so that during the subsequent flushing of
4555 * dirty pages and freeing of disk blocks, we can guarantee that any
4556 * commit will leave the blocks being flushed in an unused state on
4557 * disk.  (On recovery, the inode will get truncated and the blocks will
4558 * be freed, so we have a strong guarantee that no future commit will
4559 * leave these blocks visible to the user.)
4560 *
4561 * Another thing we have to assure is that if we are in ordered mode
4562 * and inode is still attached to the committing transaction, we must
4563 * we start writeout of all the dirty pages which are being truncated.
4564 * This way we are sure that all the data written in the previous
4565 * transaction are already on disk (truncate waits for pages under
4566 * writeback).
4567 *
4568 * Called with inode->i_mutex down.
4569 */
4570int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4571{
4572	struct inode *inode = dentry->d_inode;
4573	int error, rc = 0;
4574	int orphan = 0;
4575	const unsigned int ia_valid = attr->ia_valid;
4576
4577	error = inode_change_ok(inode, attr);
4578	if (error)
4579		return error;
4580
4581	if (is_quota_modification(inode, attr))
4582		dquot_initialize(inode);
4583	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4584	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4585		handle_t *handle;
4586
4587		/* (user+group)*(old+new) structure, inode write (sb,
4588		 * inode block, ? - but truncate inode update has it) */
4589		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
4590			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
4591			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
4592		if (IS_ERR(handle)) {
4593			error = PTR_ERR(handle);
4594			goto err_out;
4595		}
4596		error = dquot_transfer(inode, attr);
4597		if (error) {
4598			ext4_journal_stop(handle);
4599			return error;
4600		}
4601		/* Update corresponding info in inode so that everything is in
4602		 * one transaction */
4603		if (attr->ia_valid & ATTR_UID)
4604			inode->i_uid = attr->ia_uid;
4605		if (attr->ia_valid & ATTR_GID)
4606			inode->i_gid = attr->ia_gid;
4607		error = ext4_mark_inode_dirty(handle, inode);
4608		ext4_journal_stop(handle);
4609	}
4610
4611	if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
4612		handle_t *handle;
4613
4614		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4615			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4616
4617			if (attr->ia_size > sbi->s_bitmap_maxbytes)
4618				return -EFBIG;
4619		}
 
4620
4621		if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
4622			inode_inc_iversion(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4623
4624		if (S_ISREG(inode->i_mode) &&
4625		    (attr->ia_size < inode->i_size)) {
4626			if (ext4_should_order_data(inode)) {
4627				error = ext4_begin_ordered_truncate(inode,
4628							    attr->ia_size);
4629				if (error)
 
 
 
 
4630					goto err_out;
4631			}
4632			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4633			if (IS_ERR(handle)) {
4634				error = PTR_ERR(handle);
4635				goto err_out;
4636			}
4637			if (ext4_handle_valid(handle)) {
4638				error = ext4_orphan_add(handle, inode);
4639				orphan = 1;
4640			}
4641			down_write(&EXT4_I(inode)->i_data_sem);
4642			EXT4_I(inode)->i_disksize = attr->ia_size;
4643			rc = ext4_mark_inode_dirty(handle, inode);
4644			if (!error)
4645				error = rc;
4646			/*
4647			 * We have to update i_size under i_data_sem together
4648			 * with i_disksize to avoid races with writeback code
4649			 * running ext4_wb_update_i_disksize().
4650			 */
4651			if (!error)
4652				i_size_write(inode, attr->ia_size);
4653			up_write(&EXT4_I(inode)->i_data_sem);
4654			ext4_journal_stop(handle);
4655			if (error) {
4656				ext4_orphan_del(NULL, inode);
4657				goto err_out;
4658			}
4659		} else
4660			i_size_write(inode, attr->ia_size);
4661
4662		/*
4663		 * Blocks are going to be removed from the inode. Wait
4664		 * for dio in flight.  Temporarily disable
4665		 * dioread_nolock to prevent livelock.
4666		 */
4667		if (orphan) {
4668			if (!ext4_should_journal_data(inode)) {
4669				ext4_inode_block_unlocked_dio(inode);
4670				inode_dio_wait(inode);
4671				ext4_inode_resume_unlocked_dio(inode);
4672			} else
4673				ext4_wait_for_tail_page_commit(inode);
4674		}
4675		/*
4676		 * Truncate pagecache after we've waited for commit
4677		 * in data=journal mode to make pages freeable.
4678		 */
4679			truncate_pagecache(inode, inode->i_size);
4680	}
4681	/*
4682	 * We want to call ext4_truncate() even if attr->ia_size ==
4683	 * inode->i_size for cases like truncation of fallocated space
4684	 */
4685	if (attr->ia_valid & ATTR_SIZE)
4686		ext4_truncate(inode);
 
4687
4688	if (!rc) {
4689		setattr_copy(inode, attr);
4690		mark_inode_dirty(inode);
4691	}
4692
4693	/*
4694	 * If the call to ext4_truncate failed to get a transaction handle at
4695	 * all, we need to clean up the in-core orphan list manually.
4696	 */
4697	if (orphan && inode->i_nlink)
4698		ext4_orphan_del(NULL, inode);
4699
4700	if (!rc && (ia_valid & ATTR_MODE))
4701		rc = posix_acl_chmod(inode, inode->i_mode);
4702
4703err_out:
4704	ext4_std_error(inode->i_sb, error);
4705	if (!error)
4706		error = rc;
4707	return error;
4708}
4709
4710int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4711		 struct kstat *stat)
4712{
4713	struct inode *inode;
4714	unsigned long long delalloc_blocks;
4715
4716	inode = dentry->d_inode;
4717	generic_fillattr(inode, stat);
4718
4719	/*
4720	 * If there is inline data in the inode, the inode will normally not
4721	 * have data blocks allocated (it may have an external xattr block).
4722	 * Report at least one sector for such files, so tools like tar, rsync,
4723	 * others doen't incorrectly think the file is completely sparse.
4724	 */
4725	if (unlikely(ext4_has_inline_data(inode)))
4726		stat->blocks += (stat->size + 511) >> 9;
4727
4728	/*
4729	 * We can't update i_blocks if the block allocation is delayed
4730	 * otherwise in the case of system crash before the real block
4731	 * allocation is done, we will have i_blocks inconsistent with
4732	 * on-disk file blocks.
4733	 * We always keep i_blocks updated together with real
4734	 * allocation. But to not confuse with user, stat
4735	 * will return the blocks that include the delayed allocation
4736	 * blocks for this file.
4737	 */
4738	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4739				   EXT4_I(inode)->i_reserved_data_blocks);
4740	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
 
4741	return 0;
4742}
4743
4744static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
4745				   int pextents)
4746{
4747	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4748		return ext4_ind_trans_blocks(inode, lblocks);
4749	return ext4_ext_index_trans_blocks(inode, pextents);
4750}
4751
4752/*
4753 * Account for index blocks, block groups bitmaps and block group
4754 * descriptor blocks if modify datablocks and index blocks
4755 * worse case, the indexs blocks spread over different block groups
4756 *
4757 * If datablocks are discontiguous, they are possible to spread over
4758 * different block groups too. If they are contiguous, with flexbg,
4759 * they could still across block group boundary.
4760 *
4761 * Also account for superblock, inode, quota and xattr blocks
4762 */
4763static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
4764				  int pextents)
4765{
4766	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4767	int gdpblocks;
4768	int idxblocks;
4769	int ret = 0;
4770
4771	/*
4772	 * How many index blocks need to touch to map @lblocks logical blocks
4773	 * to @pextents physical extents?
 
 
 
 
4774	 */
4775	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
4776
4777	ret = idxblocks;
4778
4779	/*
4780	 * Now let's see how many group bitmaps and group descriptors need
4781	 * to account
4782	 */
4783	groups = idxblocks + pextents;
 
 
 
 
 
4784	gdpblocks = groups;
4785	if (groups > ngroups)
4786		groups = ngroups;
4787	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4788		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4789
4790	/* bitmaps and block group descriptor blocks */
4791	ret += groups + gdpblocks;
4792
4793	/* Blocks for super block, inode, quota and xattr blocks */
4794	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4795
4796	return ret;
4797}
4798
4799/*
4800 * Calculate the total number of credits to reserve to fit
4801 * the modification of a single pages into a single transaction,
4802 * which may include multiple chunks of block allocations.
4803 *
4804 * This could be called via ext4_write_begin()
4805 *
4806 * We need to consider the worse case, when
4807 * one new block per extent.
4808 */
4809int ext4_writepage_trans_blocks(struct inode *inode)
4810{
4811	int bpp = ext4_journal_blocks_per_page(inode);
4812	int ret;
4813
4814	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
4815
4816	/* Account for data blocks for journalled mode */
4817	if (ext4_should_journal_data(inode))
4818		ret += bpp;
4819	return ret;
4820}
4821
4822/*
4823 * Calculate the journal credits for a chunk of data modification.
4824 *
4825 * This is called from DIO, fallocate or whoever calling
4826 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4827 *
4828 * journal buffers for data blocks are not included here, as DIO
4829 * and fallocate do no need to journal data buffers.
4830 */
4831int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4832{
4833	return ext4_meta_trans_blocks(inode, nrblocks, 1);
4834}
4835
4836/*
4837 * The caller must have previously called ext4_reserve_inode_write().
4838 * Give this, we know that the caller already has write access to iloc->bh.
4839 */
4840int ext4_mark_iloc_dirty(handle_t *handle,
4841			 struct inode *inode, struct ext4_iloc *iloc)
4842{
4843	int err = 0;
4844
4845	if (IS_I_VERSION(inode))
4846		inode_inc_iversion(inode);
4847
4848	/* the do_update_inode consumes one bh->b_count */
4849	get_bh(iloc->bh);
4850
4851	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4852	err = ext4_do_update_inode(handle, inode, iloc);
4853	put_bh(iloc->bh);
4854	return err;
4855}
4856
4857/*
4858 * On success, We end up with an outstanding reference count against
4859 * iloc->bh.  This _must_ be cleaned up later.
4860 */
4861
4862int
4863ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4864			 struct ext4_iloc *iloc)
4865{
4866	int err;
4867
4868	err = ext4_get_inode_loc(inode, iloc);
4869	if (!err) {
4870		BUFFER_TRACE(iloc->bh, "get_write_access");
4871		err = ext4_journal_get_write_access(handle, iloc->bh);
4872		if (err) {
4873			brelse(iloc->bh);
4874			iloc->bh = NULL;
4875		}
4876	}
4877	ext4_std_error(inode->i_sb, err);
4878	return err;
4879}
4880
4881/*
4882 * Expand an inode by new_extra_isize bytes.
4883 * Returns 0 on success or negative error number on failure.
4884 */
4885static int ext4_expand_extra_isize(struct inode *inode,
4886				   unsigned int new_extra_isize,
4887				   struct ext4_iloc iloc,
4888				   handle_t *handle)
4889{
4890	struct ext4_inode *raw_inode;
4891	struct ext4_xattr_ibody_header *header;
4892
4893	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4894		return 0;
4895
4896	raw_inode = ext4_raw_inode(&iloc);
4897
4898	header = IHDR(inode, raw_inode);
4899
4900	/* No extended attributes present */
4901	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4902	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4903		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4904			new_extra_isize);
4905		EXT4_I(inode)->i_extra_isize = new_extra_isize;
4906		return 0;
4907	}
4908
4909	/* try to expand with EAs present */
4910	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4911					  raw_inode, handle);
4912}
4913
4914/*
4915 * What we do here is to mark the in-core inode as clean with respect to inode
4916 * dirtiness (it may still be data-dirty).
4917 * This means that the in-core inode may be reaped by prune_icache
4918 * without having to perform any I/O.  This is a very good thing,
4919 * because *any* task may call prune_icache - even ones which
4920 * have a transaction open against a different journal.
4921 *
4922 * Is this cheating?  Not really.  Sure, we haven't written the
4923 * inode out, but prune_icache isn't a user-visible syncing function.
4924 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4925 * we start and wait on commits.
 
 
 
 
 
 
 
 
4926 */
4927int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4928{
4929	struct ext4_iloc iloc;
4930	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4931	static unsigned int mnt_count;
4932	int err, ret;
4933
4934	might_sleep();
4935	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4936	err = ext4_reserve_inode_write(handle, inode, &iloc);
4937	if (ext4_handle_valid(handle) &&
4938	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4939	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4940		/*
4941		 * We need extra buffer credits since we may write into EA block
4942		 * with this same handle. If journal_extend fails, then it will
4943		 * only result in a minor loss of functionality for that inode.
4944		 * If this is felt to be critical, then e2fsck should be run to
4945		 * force a large enough s_min_extra_isize.
4946		 */
4947		if ((jbd2_journal_extend(handle,
4948			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4949			ret = ext4_expand_extra_isize(inode,
4950						      sbi->s_want_extra_isize,
4951						      iloc, handle);
4952			if (ret) {
4953				ext4_set_inode_state(inode,
4954						     EXT4_STATE_NO_EXPAND);
4955				if (mnt_count !=
4956					le16_to_cpu(sbi->s_es->s_mnt_count)) {
4957					ext4_warning(inode->i_sb,
4958					"Unable to expand inode %lu. Delete"
4959					" some EAs or run e2fsck.",
4960					inode->i_ino);
4961					mnt_count =
4962					  le16_to_cpu(sbi->s_es->s_mnt_count);
4963				}
4964			}
4965		}
4966	}
4967	if (!err)
4968		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4969	return err;
4970}
4971
4972/*
4973 * ext4_dirty_inode() is called from __mark_inode_dirty()
4974 *
4975 * We're really interested in the case where a file is being extended.
4976 * i_size has been changed by generic_commit_write() and we thus need
4977 * to include the updated inode in the current transaction.
4978 *
4979 * Also, dquot_alloc_block() will always dirty the inode when blocks
4980 * are allocated to the file.
4981 *
4982 * If the inode is marked synchronous, we don't honour that here - doing
4983 * so would cause a commit on atime updates, which we don't bother doing.
4984 * We handle synchronous inodes at the highest possible level.
4985 */
4986void ext4_dirty_inode(struct inode *inode, int flags)
4987{
4988	handle_t *handle;
4989
4990	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
4991	if (IS_ERR(handle))
4992		goto out;
4993
4994	ext4_mark_inode_dirty(handle, inode);
4995
4996	ext4_journal_stop(handle);
4997out:
4998	return;
4999}
5000
5001#if 0
5002/*
5003 * Bind an inode's backing buffer_head into this transaction, to prevent
5004 * it from being flushed to disk early.  Unlike
5005 * ext4_reserve_inode_write, this leaves behind no bh reference and
5006 * returns no iloc structure, so the caller needs to repeat the iloc
5007 * lookup to mark the inode dirty later.
5008 */
5009static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5010{
5011	struct ext4_iloc iloc;
5012
5013	int err = 0;
5014	if (handle) {
5015		err = ext4_get_inode_loc(inode, &iloc);
5016		if (!err) {
5017			BUFFER_TRACE(iloc.bh, "get_write_access");
5018			err = jbd2_journal_get_write_access(handle, iloc.bh);
5019			if (!err)
5020				err = ext4_handle_dirty_metadata(handle,
5021								 NULL,
5022								 iloc.bh);
5023			brelse(iloc.bh);
5024		}
5025	}
5026	ext4_std_error(inode->i_sb, err);
5027	return err;
5028}
5029#endif
5030
5031int ext4_change_inode_journal_flag(struct inode *inode, int val)
5032{
5033	journal_t *journal;
5034	handle_t *handle;
5035	int err;
5036
5037	/*
5038	 * We have to be very careful here: changing a data block's
5039	 * journaling status dynamically is dangerous.  If we write a
5040	 * data block to the journal, change the status and then delete
5041	 * that block, we risk forgetting to revoke the old log record
5042	 * from the journal and so a subsequent replay can corrupt data.
5043	 * So, first we make sure that the journal is empty and that
5044	 * nobody is changing anything.
5045	 */
5046
5047	journal = EXT4_JOURNAL(inode);
5048	if (!journal)
5049		return 0;
5050	if (is_journal_aborted(journal))
5051		return -EROFS;
5052	/* We have to allocate physical blocks for delalloc blocks
5053	 * before flushing journal. otherwise delalloc blocks can not
5054	 * be allocated any more. even more truncate on delalloc blocks
5055	 * could trigger BUG by flushing delalloc blocks in journal.
5056	 * There is no delalloc block in non-journal data mode.
5057	 */
5058	if (val && test_opt(inode->i_sb, DELALLOC)) {
5059		err = ext4_alloc_da_blocks(inode);
5060		if (err < 0)
5061			return err;
5062	}
5063
5064	/* Wait for all existing dio workers */
5065	ext4_inode_block_unlocked_dio(inode);
5066	inode_dio_wait(inode);
5067
5068	jbd2_journal_lock_updates(journal);
5069
5070	/*
5071	 * OK, there are no updates running now, and all cached data is
5072	 * synced to disk.  We are now in a completely consistent state
5073	 * which doesn't have anything in the journal, and we know that
5074	 * no filesystem updates are running, so it is safe to modify
5075	 * the inode's in-core data-journaling state flag now.
5076	 */
5077
5078	if (val)
5079		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5080	else {
5081		jbd2_journal_flush(journal);
5082		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5083	}
5084	ext4_set_aops(inode);
5085
5086	jbd2_journal_unlock_updates(journal);
5087	ext4_inode_resume_unlocked_dio(inode);
5088
5089	/* Finally we can mark the inode as dirty. */
5090
5091	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
5092	if (IS_ERR(handle))
5093		return PTR_ERR(handle);
5094
5095	err = ext4_mark_inode_dirty(handle, inode);
5096	ext4_handle_sync(handle);
5097	ext4_journal_stop(handle);
5098	ext4_std_error(inode->i_sb, err);
5099
5100	return err;
5101}
5102
5103static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5104{
5105	return !buffer_mapped(bh);
5106}
5107
5108int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5109{
5110	struct page *page = vmf->page;
5111	loff_t size;
5112	unsigned long len;
5113	int ret;
5114	struct file *file = vma->vm_file;
5115	struct inode *inode = file_inode(file);
5116	struct address_space *mapping = inode->i_mapping;
5117	handle_t *handle;
5118	get_block_t *get_block;
5119	int retries = 0;
5120
5121	sb_start_pagefault(inode->i_sb);
5122	file_update_time(vma->vm_file);
 
 
 
5123	/* Delalloc case is easy... */
5124	if (test_opt(inode->i_sb, DELALLOC) &&
5125	    !ext4_should_journal_data(inode) &&
5126	    !ext4_nonda_switch(inode->i_sb)) {
5127		do {
5128			ret = __block_page_mkwrite(vma, vmf,
5129						   ext4_da_get_block_prep);
5130		} while (ret == -ENOSPC &&
5131		       ext4_should_retry_alloc(inode->i_sb, &retries));
5132		goto out_ret;
5133	}
5134
5135	lock_page(page);
5136	size = i_size_read(inode);
5137	/* Page got truncated from under us? */
5138	if (page->mapping != mapping || page_offset(page) > size) {
5139		unlock_page(page);
5140		ret = VM_FAULT_NOPAGE;
5141		goto out;
5142	}
5143
5144	if (page->index == size >> PAGE_CACHE_SHIFT)
5145		len = size & ~PAGE_CACHE_MASK;
5146	else
5147		len = PAGE_CACHE_SIZE;
5148	/*
5149	 * Return if we have all the buffers mapped. This avoids the need to do
5150	 * journal_start/journal_stop which can block and take a long time
5151	 */
5152	if (page_has_buffers(page)) {
5153		if (!ext4_walk_page_buffers(NULL, page_buffers(page),
5154					    0, len, NULL,
5155					    ext4_bh_unmapped)) {
5156			/* Wait so that we don't change page under IO */
5157			wait_for_stable_page(page);
5158			ret = VM_FAULT_LOCKED;
5159			goto out;
5160		}
5161	}
5162	unlock_page(page);
5163	/* OK, we need to fill the hole... */
5164	if (ext4_should_dioread_nolock(inode))
5165		get_block = ext4_get_block_write;
5166	else
5167		get_block = ext4_get_block;
5168retry_alloc:
5169	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
5170				    ext4_writepage_trans_blocks(inode));
5171	if (IS_ERR(handle)) {
5172		ret = VM_FAULT_SIGBUS;
5173		goto out;
5174	}
5175	ret = __block_page_mkwrite(vma, vmf, get_block);
5176	if (!ret && ext4_should_journal_data(inode)) {
5177		if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5178			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
5179			unlock_page(page);
5180			ret = VM_FAULT_SIGBUS;
5181			ext4_journal_stop(handle);
5182			goto out;
5183		}
5184		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
5185	}
5186	ext4_journal_stop(handle);
5187	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
5188		goto retry_alloc;
5189out_ret:
5190	ret = block_page_mkwrite_return(ret);
5191out:
5192	sb_end_pagefault(inode->i_sb);
5193	return ret;
5194}